Preparation

Libs and parameters

# Libraries
library(ggplot2)
library(assortedRFunctions)
library(cowplot) 
library(stringr)
library(plyr)
library(mgcv)
library(foreach)
library(doParallel)
library(BayesFactor)
library(ciftiTools)
library(viridis)
library(tidybayes)
library(bayesplot)
library(brms)
#library(lmerTest)
library(data.table)
library(knitr)
#library(caTools) 
#library(e1071) 
library(readxl)
library(ggridges)
library(ggforce)
library(marginaleffects)
library(gghalves)
library(effectsize)

# For SVM
library(caTools) 
library(e1071) 
Some settings that change from computer to computer that I work on.
# Use correct locations and other settings based on computer
if(Sys.info()[4] == "DESKTOP-335I26I"){
  # Work laptop (Windows)
  ## Setting paths to workbench installation
  ciftiTools.setOption("wb_path", "C:/Program Files/workbench-windows64-v1.5.0/workbench/bin_windows64")
  
  ## Path to the imaging data
  path2imaging_results2 <- "D:/Seafile/imaging_results"
  
} else if(Sys.info()[4] == 'DESKTOP-91CQCSQ') {
  # Work desktop (Windows)
  ## Setting paths to workbench installation
  ciftiTools.setOption("wb_path", "D:/workbench/bin_windows64")
  
  ## Path to the imaging data
  path2imaging_results2 <- "D:/imaging_results"
} else if(Sys.info()[4] == 'alex-Zenbook-UX3404VA-UX3404VA') {
  # Work laptop (Linux)
  ## Setting paths to workbench installation
  ciftiTools.setOption("wb_path", "/usr/bin/wb_command")
  
  ## Path to the imaging data
  path2imaging_results2 <- "/media/alex/shared/Seafile/imaging_results"
  
} else if(Sys.info()[4] == "greengoblin"){
  # Main desktop PC (Linux)
  ciftiTools.setOption("wb_path", "/usr/bin/wb_command") 
  path2imaging_results2 <- "/media/alex/work/Seafile/imaging_results" 
} else if(Sys.info()[4] == "GREEN-GOBLIN-WI"){
  # Main desktop PC 
  ciftiTools.setOption("wb_path", "C:/Program Files/workbench-windows64-v2.0.1/workbench/bin_windows64")
  path2imaging_results2 <- "E:/Seafile/imaging_results" 
} else {
  # Personal laptop (Windows)
  ## Setting paths to workbench installation
  ciftiTools.setOption("wb_path", "D:/Program Files/workbench/bin_windows64")
  
  ## Path to the imaging data
  path2imaging_results2 <- "D:/OLM/imaging_results"
}

# Seed
set.seed(20240205)
Click here for detailed session information.
sessioninfo::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
##  setting  value
##  version  R version 4.4.0 (2024-04-24)
##  os       Ubuntu 22.04.5 LTS
##  system   x86_64, linux-gnu
##  ui       X11
##  language en_GB:en
##  collate  en_GB.UTF-8
##  ctype    en_GB.UTF-8
##  tz       Asia/Shanghai
##  date     2025-04-09
##  pandoc   3.1.1 @ /usr/lib/rstudio/resources/app/bin/quarto/bin/tools/ (via rmarkdown)
## 
## ─ Packages ───────────────────────────────────────────────────────────────────
##  package            * version    date (UTC) lib source
##  abind                1.4-5      2016-07-21 [1] CRAN (R 4.4.0)
##  arrayhelpers         1.1-0      2020-02-04 [1] CRAN (R 4.4.0)
##  assortedRFunctions * 0.0.1      2025-01-06 [1] Github (JAQuent/assortedrFunctions@b887289)
##  backports            1.5.0      2024-05-23 [1] CRAN (R 4.4.0)
##  base64enc            0.1-3      2015-07-28 [1] CRAN (R 4.4.0)
##  BayesFactor        * 0.9.12-4.7 2024-01-24 [1] CRAN (R 4.4.0)
##  bayesplot          * 1.11.1     2024-02-15 [1] CRAN (R 4.4.0)
##  bayestestR           0.13.2     2024-02-12 [1] CRAN (R 4.4.0)
##  bitops               1.0-7      2021-04-24 [1] CRAN (R 4.4.0)
##  bridgesampling       1.1-2      2021-04-16 [1] CRAN (R 4.4.0)
##  brms               * 2.21.0     2024-03-20 [1] CRAN (R 4.4.0)
##  Brobdingnag          1.2-9      2022-10-19 [1] CRAN (R 4.4.0)
##  bslib                0.7.0      2024-03-29 [1] CRAN (R 4.4.0)
##  cachem               1.1.0      2024-05-16 [1] CRAN (R 4.4.0)
##  caTools            * 1.18.2     2021-03-28 [1] CRAN (R 4.4.0)
##  cellranger           1.1.0      2016-07-27 [1] CRAN (R 4.4.0)
##  checkmate            2.3.1      2023-12-04 [1] CRAN (R 4.4.0)
##  ciftiTools         * 0.15.1     2024-06-25 [1] CRAN (R 4.4.0)
##  class                7.3-20     2022-01-13 [4] CRAN (R 4.1.2)
##  cli                  3.6.2      2023-12-11 [1] CRAN (R 4.4.0)
##  coda               * 0.19-4.1   2024-01-31 [1] CRAN (R 4.4.0)
##  codetools            0.2-18     2020-11-04 [4] CRAN (R 4.0.3)
##  colorspace           2.1-0      2023-01-23 [1] CRAN (R 4.4.0)
##  cowplot            * 1.1.3      2024-01-22 [1] CRAN (R 4.4.0)
##  data.table         * 1.15.4     2024-03-30 [1] CRAN (R 4.4.0)
##  datawizard           0.11.0     2024-06-05 [1] CRAN (R 4.4.0)
##  digest               0.6.35     2024-03-11 [1] CRAN (R 4.4.0)
##  distributional       0.4.0      2024-02-07 [1] CRAN (R 4.4.0)
##  doParallel         * 1.0.17     2022-02-07 [1] CRAN (R 4.4.0)
##  dplyr                1.1.4      2023-11-17 [1] CRAN (R 4.4.0)
##  e1071              * 1.7-14     2023-12-06 [1] CRAN (R 4.4.0)
##  effectsize         * 0.8.8      2024-05-12 [1] CRAN (R 4.4.0)
##  emmeans              1.10.4     2024-08-21 [1] CRAN (R 4.4.0)
##  estimability         1.5.1      2024-05-12 [1] CRAN (R 4.4.0)
##  evaluate             0.23       2023-11-01 [1] CRAN (R 4.4.0)
##  fansi                1.0.6      2023-12-08 [1] CRAN (R 4.4.0)
##  farver               2.1.2      2024-05-13 [1] CRAN (R 4.4.0)
##  fastmap              1.2.0      2024-05-15 [1] CRAN (R 4.4.0)
##  foreach            * 1.5.2      2022-02-02 [1] CRAN (R 4.4.0)
##  generics             0.1.3      2022-07-05 [1] CRAN (R 4.4.0)
##  ggdist               3.3.2      2024-03-05 [1] CRAN (R 4.4.0)
##  ggforce            * 0.4.2      2024-02-19 [1] CRAN (R 4.4.0)
##  gghalves           * 0.1.4      2022-11-20 [1] CRAN (R 4.4.0)
##  ggplot2            * 3.5.1      2024-04-23 [1] CRAN (R 4.4.0)
##  ggridges           * 0.5.6      2024-01-23 [1] CRAN (R 4.4.0)
##  gifti                0.8.0      2020-11-11 [1] CRAN (R 4.4.0)
##  glue                 1.7.0      2024-01-09 [1] CRAN (R 4.4.0)
##  gridExtra            2.3        2017-09-09 [1] CRAN (R 4.4.0)
##  gtable               0.3.5      2024-04-22 [1] CRAN (R 4.4.0)
##  htmltools            0.5.8.1    2024-04-04 [1] CRAN (R 4.4.0)
##  inline               0.3.19     2021-05-31 [1] CRAN (R 4.4.0)
##  insight              0.20.0     2024-06-04 [1] CRAN (R 4.4.0)
##  iterators          * 1.0.14     2022-02-05 [1] CRAN (R 4.4.0)
##  jquerylib            0.1.4      2021-04-26 [1] CRAN (R 4.4.0)
##  jsonlite             1.8.8      2023-12-04 [1] CRAN (R 4.4.0)
##  knitr              * 1.47       2024-05-29 [1] CRAN (R 4.4.0)
##  lattice              0.20-45    2021-09-22 [4] CRAN (R 4.1.1)
##  lifecycle            1.0.4      2023-11-07 [1] CRAN (R 4.4.0)
##  loo                  2.7.0      2024-02-24 [1] CRAN (R 4.4.0)
##  magrittr             2.0.3      2022-03-30 [1] CRAN (R 4.4.0)
##  marginaleffects    * 0.21.0     2024-06-14 [1] CRAN (R 4.4.0)
##  MASS                 7.3-64     2025-01-04 [1] CRAN (R 4.4.0)
##  Matrix             * 1.7-0      2024-04-26 [1] CRAN (R 4.4.0)
##  MatrixModels         0.5-3      2023-11-06 [1] CRAN (R 4.4.0)
##  matrixStats          1.3.0      2024-04-11 [1] CRAN (R 4.4.0)
##  mgcv               * 1.8-39     2022-02-24 [4] CRAN (R 4.1.2)
##  multcomp             1.4-26     2024-07-18 [1] CRAN (R 4.4.0)
##  munsell              0.5.1      2024-04-01 [1] CRAN (R 4.4.0)
##  mvtnorm              1.2-5      2024-05-21 [1] CRAN (R 4.4.0)
##  nlme               * 3.1-155    2022-01-13 [4] CRAN (R 4.1.2)
##  oro.nifti            0.11.4     2022-08-10 [1] CRAN (R 4.4.0)
##  parameters           0.21.7     2024-05-14 [1] CRAN (R 4.4.0)
##  pbapply              1.7-2      2023-06-27 [1] CRAN (R 4.4.0)
##  pillar               1.9.0      2023-03-22 [1] CRAN (R 4.4.0)
##  pkgbuild             1.4.4      2024-03-17 [1] CRAN (R 4.4.0)
##  pkgconfig            2.0.3      2019-09-22 [1] CRAN (R 4.4.0)
##  plyr               * 1.8.9      2023-10-02 [1] CRAN (R 4.4.0)
##  polyclip             1.10-6     2023-09-27 [1] CRAN (R 4.4.0)
##  posterior            1.5.0      2023-10-31 [1] CRAN (R 4.4.0)
##  proxy                0.4-27     2022-06-09 [1] CRAN (R 4.4.0)
##  purrr                1.0.2      2023-08-10 [1] CRAN (R 4.4.0)
##  QuickJSR             1.2.2      2024-06-07 [1] CRAN (R 4.4.0)
##  R.methodsS3          1.8.2      2022-06-13 [1] CRAN (R 4.4.0)
##  R.oo                 1.26.0     2024-01-24 [1] CRAN (R 4.4.0)
##  R.utils              2.12.3     2023-11-18 [1] CRAN (R 4.4.0)
##  R6                   2.5.1      2021-08-19 [1] CRAN (R 4.4.0)
##  RColorBrewer         1.1-3      2022-04-03 [1] CRAN (R 4.4.0)
##  Rcpp               * 1.0.12     2024-01-09 [1] CRAN (R 4.4.0)
##  RcppParallel         5.1.7      2023-02-27 [1] CRAN (R 4.4.0)
##  readxl             * 1.4.3      2023-07-06 [1] CRAN (R 4.4.0)
##  rlang                1.1.4      2024-06-04 [1] CRAN (R 4.4.0)
##  rmarkdown            2.27       2024-05-17 [1] CRAN (R 4.4.0)
##  RNifti               1.6.1      2024-03-07 [1] CRAN (R 4.4.0)
##  rstan                2.32.6     2024-03-05 [1] CRAN (R 4.4.0)
##  rstantools           2.4.0      2024-01-31 [1] CRAN (R 4.4.0)
##  rstudioapi           0.16.0     2024-03-24 [1] CRAN (R 4.4.0)
##  sandwich             3.1-1      2024-09-15 [1] CRAN (R 4.4.0)
##  sass                 0.4.9      2024-03-15 [1] CRAN (R 4.4.0)
##  scales               1.3.0      2023-11-28 [1] CRAN (R 4.4.0)
##  sessioninfo          1.2.2      2021-12-06 [1] CRAN (R 4.4.0)
##  StanHeaders          2.32.9     2024-05-29 [1] CRAN (R 4.4.0)
##  stringi              1.8.4      2024-05-06 [1] CRAN (R 4.4.0)
##  stringr            * 1.5.1      2023-11-14 [1] CRAN (R 4.4.0)
##  survival             3.2-13     2021-08-24 [4] CRAN (R 4.1.1)
##  svUnit               1.0.6      2021-04-19 [1] CRAN (R 4.4.0)
##  tensorA              0.36.2.1   2023-12-13 [1] CRAN (R 4.4.0)
##  TH.data              1.1-2      2023-04-17 [1] CRAN (R 4.4.0)
##  tibble               3.2.1      2023-03-20 [1] CRAN (R 4.4.0)
##  tidybayes          * 3.0.6      2023-08-12 [1] CRAN (R 4.4.0)
##  tidyr                1.3.1      2024-01-24 [1] CRAN (R 4.4.0)
##  tidyselect           1.2.1      2024-03-11 [1] CRAN (R 4.4.0)
##  tweenr               2.0.3      2024-02-26 [1] CRAN (R 4.4.0)
##  utf8                 1.2.4      2023-10-22 [1] CRAN (R 4.4.0)
##  vctrs                0.6.5      2023-12-01 [1] CRAN (R 4.4.0)
##  viridis            * 0.6.5      2024-01-29 [1] CRAN (R 4.4.0)
##  viridisLite        * 0.4.2      2023-05-02 [1] CRAN (R 4.4.0)
##  withr                3.0.0      2024-01-16 [1] CRAN (R 4.4.0)
##  xfun                 0.44       2024-05-15 [1] CRAN (R 4.4.0)
##  xml2                 1.3.6      2023-12-04 [1] CRAN (R 4.4.0)
##  xtable               1.8-4      2019-04-21 [1] CRAN (R 4.4.0)
##  yaml                 2.3.8      2023-12-11 [1] CRAN (R 4.4.0)
##  zoo                  1.8-12     2023-04-13 [1] CRAN (R 4.4.0)
## 
##  [1] /home/alex/R/x86_64-pc-linux-gnu-library/4.4
##  [2] /usr/local/lib/R/site-library
##  [3] /usr/lib/R/site-library
##  [4] /usr/lib/R/library
## 
## ──────────────────────────────────────────────────────────────────────────────
Click here for chunk for statistical reporting parameters.
# Significance cut off 
cutOff <- 1.301 # Because of -log(0.05, 10)

# Parameters how to report means
report_type   <- 1
digits1       <- 2
rounding_type <- "signif"

# Font sizes
font_size   <- 8
base_theme  <- theme_classic(base_size = font_size) + 
               theme(plot.title = element_text(hjust = 0.5),
                     plot.background = element_blank(), 
                     panel.background = element_rect(fill = "transparent"))

base_theme2  <- theme_classic(base_size = font_size) + 
               theme(plot.title = element_text(hjust = 0.5),
                     plot.background = element_blank(), 
                     panel.background = element_rect(fill = "transparent"),
                     text = element_text(family = ""),
                     strip.background = element_rect(color="white", fill="white"))

# Parameters for saving figures
## Information: https://www.nature.com/ncomms/submit/how-to-submit
### PDF page: 210 x 276 mm
### Column widths in mm
single_column <- 88
double_column <- 180
dpi           <- 1000
figurePath    <- "figures/SpaNov/"
axisExpand  <- 0.05 # multiply the x & y values

# Function to calculate axis limits
calc_axis_limits <- function(x, axisExpand, digits = 2, rounding_type = "signif"){
  # Get the empirical values
  val_range <- range(x, na.rm = TRUE)
  
  # Create axis_breaks
  axis_breaks <- rep(NA, 3)
  
  # Add middle point
  axis_breaks[2] <- mean(val_range, na.rm = TRUE)
  
  # Minimum value
  if(val_range[1] < 0){
    axis_breaks[1] <- val_range[1] + val_range[1]*axisExpand
  } else {
    axis_breaks[1] <- val_range[1] - val_range[1]*axisExpand
  }
  
  # Maximum value
  if(val_range[2] < 0){
    axis_breaks[3] <- val_range[2] - val_range[2]*axisExpand
  } else {
    axis_breaks[3] <- val_range[2] + val_range[2]*axisExpand
  }
  
  
  # Only use significant digits
  if(rounding_type == "signif"){
    axis_breaks <- signif(axis_breaks, digits) 
  } else if(rounding_type == "round"){
    axis_breaks <- round(axis_breaks, digits) 
  } else {
    stop("Wrong rounding_type. Choose signif or round")
  }
  
  # Get the actual limits
  axis_limits <- axis_breaks[c(1, 3)]
  
  # Return list
  return(list(limits = axis_limits, breaks = axis_breaks))
}

# Colours used for visualisation
meanLineColour       <- "red"
boxplot_borderColour <- "black"
boxplot_pointColour  <- "darkgrey"
baseColours          <- c("#7091C0", "#4A66AC", "#364875")
spectral_colours     <- c("#5956a5", "#a60a44")
cool_warm_colours    <- c("#3C4DC1", "#B70B28")
novFam_gradient      <- viridis(n = 6, option = "H", direction = -1)

# Information for Yeo 7
## Names etc. for the networks in Yeo 7
Yeo7_fullNames <- c("Frontoparietal", "Default", "Dorsal Attention", "Limbic", 
                    "Ventral Attention", "Somatomotor", "Visual")
Yeo7_abbr      <- c("Cont", "Default", "DorsAttn", "Limbic", "SalVentAttn", "SomMot", "Vis")

## Colours used for Yeo 7
Yeo7_colours <- c("#E79523", "#CD3E4E", "#00760F", "#DCF8A4", "#C43BFA", "#4682B4", "#781286")

Cifti files

Click here for support CIFTI files etc.
# Loading the support CIFTI files
## Place where to find some of the CIFTI files and parcellations
CIFTI_locations <- "data/ignore_fMRI_version1/sourceFiles/"

## CIFTI files
parcellationFile <- "Q1-Q6_RelatedValidation210.CorticalAreas_dil_Final_Final_Areas_Group_Colors_with_Atlas_ROIs2.32k_fs_LR.dlabel.nii"
CAB_NP           <- "CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_netassignments_LR.dlabel.nii"
surfLeft         <- "S1200.L.inflated_MSMAll.32k_fs_LR.surf.gii"
surfRight        <- "S1200.R.inflated_MSMAll.32k_fs_LR.surf.gii"

## Combine CIFTI_locations with file name
parcellationFile <- paste0(CIFTI_locations, parcellationFile)
CAB_NP           <- paste0(CIFTI_locations, CAB_NP)
surfLeft         <- paste0(CIFTI_locations, surfLeft)
surfRight        <- paste0(CIFTI_locations, surfRight)

## Loading CIFTIw via ciftiTools as xiis
### Get MMP parcellation
MMP_xii <- ciftiTools::read_cifti(parcellationFile,
                                  brainstructures = "all", 
                                  surfL_fname = surfLeft, 
                                  surfR_fname = surfRight)

### Load Yeo 7 parcellation
Yeo7_xii <- ciftiTools::read_cifti("other_stuff/Yeo7.dlabel.nii",
                                  surfL_fname = surfLeft, 
                                  surfR_fname = surfRight)

## Load other stuff
### Load the parcel names for MMP
parcel_names <- read.csv("data/ignore_fMRI_version1/extracted_values/Parcellations/MP1.0_210V_parcel_names.csv", header = FALSE)

### Load the extracted MNI coordinates
MNI_coord <- read.csv("other_stuff/cifti_subcortical_MNI152_coordinates.csv")

### Load the hippocampal projection values
load("other_stuff/projected_HC.RData")

Loading and preparing the data

Click here for loading data.
# Load the data
## Specify paths where the data is saved
path2data <- "data/ignore_fMRI_version1/"
EV_folder <- "ignore_eventTable2/"

## Load the look-up table that contains information of R-numbers which are retracted 
lookupTable  <- read.csv(paste0(path2data, "lookUpTable.csv"))

## Load that was used to calculate the events
load("ignore_eventTable3/images/SpaNov_event_file_contPM.RData")

## Load .RData images of the combined data (all subject in one DF)
load(paste0(path2data, "combined_data/demographics.RData"))
load(paste0(path2data, "combined_data/DW_all_data.RData"))
load(paste0(path2data, "combined_data/OLM_7T_all_data.RData"))
load(paste0(path2data, "combined_data/OLM_3T_all_data.RData"))
load(paste0(path2data, "combined_data/question_data.RData"))

# Select the subjects included in this analysis
## Load the subjects that are included in this analysis
subjectFile <- readLines(paste0(path2data, "SpaNov_subject2analyse.txt"))
subjIDs_R   <- str_split(subjectFile, pattern = ",")[[1]] 
subjIDs     <- lookupTable$anonKey[lookupTable$Rnum %in% subjIDs_R]
# Important note: subjIDs_R do not have the same order as subjIDs!!!!!!!!!!!!!

## Subset to data that is being included in the analysis
OLM_7T_position_data <- OLM_7T_position_data[OLM_7T_position_data$subject %in% subjIDs, ]
demographics         <- demographics[demographics$subject %in% subjIDs, ]
DW_position_data     <- DW_position_data[DW_position_data$subject %in% subjIDs, ]
OLM_7T_logEntries    <- OLM_7T_logEntries[OLM_7T_logEntries$ppid %in% subjIDs, ]
OLM_7T_trial_results <- OLM_7T_trial_results[OLM_7T_trial_results$subject %in% subjIDs, ]
question_data        <- question_data[question_data$subject %in% subjIDs, ]

## Subset to retrieval only
OLM_7T_retrieval <- OLM_7T_trial_results[OLM_7T_trial_results$trialType == "retrieval", ]

# Exclude control trials
no_control <- positionData2[positionData2$trialType != "control", ]

# Include only Run 1, 2 and 3 because the last retrieval run 
# doesn't count as far as this analysis is concerned
no_control <- no_control[no_control$run %in% 1:3, ]

# Get the object locations to verify object placement in screenshots
obj_locations <- ddply(OLM_7T_trial_results, c("targets", "objectName", "targetNames"),
                       summarise, object_x_sd = sd(object_x), object_x = mean(object_x),
                       object_z_sd = sd(object_z), object_z = mean(object_z))

Results

Modelling of spatial novelty during naturalistic navigation

Reporting demographic information

n       <- nrow(demographics)
str1    <- mean_SD_str2(demographics$age, type = 1, digits = digits1, rounding_type = rounding_type, measure = "years")
females <- table(demographics$gender)[1]
males   <- table(demographics$gender)[2]
  • n = 56
  • Age = M = 24 years (SD = 2.8 years)
  • Gender ratio = 35/21
  • Age range 19.7, 37.3

Figure 1

3D histogramms

get_seeds_from_voronoi_tessellation_hexagon <- function(limValues, numSeeds){
  # Create bins like it is done in voronoi_tessellation_grid_binning_2d
  # This is code directly from that function to get the coordinates of each sector
  xLim    <- sort(limValues)
  yLim    <- sort(limValues)
  byValue <- ((xLim[2] - xLim[1])/(numSeeds - 1))
  xRange  <- seq(from = xLim[1], to = xLim[2], by = byValue)
  xStepSize <- xRange[2] - xRange[1]
  x_shiftValue <- xStepSize/4
  yRange <- seq(from = yLim[1], to = yLim[2], by = xStepSize * (sqrt(3)/2))
  yRange <- yRange[1:length(xRange)]
  yRange <- yRange + (yLim[2] - max(yRange))/2
  seeds <- data.frame(x = xRange[1], y = yRange)
  shiftIndex <- rep(c(1, -1), length.out = nrow(seeds))
  seeds$x <- seeds$x + shiftIndex * x_shiftValue
  for(i in 2:length(xRange)){
    seeds <- rbind(seeds, data.frame(x = xRange[i] + shiftIndex * x_shiftValue, y = yRange))
  }
  seeds$sector <- row.names(seeds)
  return(seeds)
}

This creates the data necessary to create the 3D histogram using blender.

# Load data from 
load("E:/research_projects/OLM_project/analysis/ignore_eventTable3/images/SpaNov_event_file_contPM.RData")

# Convert list to data frame
data <- rbindlist(subj_list, idcol = "subject")

# Subject to character
data$subject <- as.character(data$subject)

# Subset to run 1, 2, 3 to include the first retrieval run
data_sub2 <- data[data$run != 4, ]

# Calculate average/sum per subject
sector_average <- ddply(data_sub2, c("subject","sector"), summarise, 
                        visits = sum(visits, na.rm = TRUE),
                        lastVisit = mean(lastVisit, na.rm = TRUE))

# Calculate average per sector
sector_average <- ddply(sector_average, c("sector"), summarise, 
                        visits = mean(visits, na.rm = TRUE),
                        lastVisit = mean(lastVisit, na.rm = TRUE))

# Bin the environment
limValues   <- c(-90,90)
numSeeds    <- 10 # Number of values per axis

# Create bins like it is done in voronoi_tessellation_grid_binning_2d
seeds <- get_seeds_from_voronoi_tessellation_hexagon(limValues, numSeeds)

# Add x & y coordinates to sector_visits. For this just loop the df
sector_average$x <- NA
sector_average$y <- NA
for(i in 1:nrow(sector_average)){
  # Get current sector
  currentSector <- sector_average$sector[i]
  
  # Get corresponding row index in seeds
  rowID <- which(seeds$sector == currentSector)
  
  # Assign the correct coordinates based on rowID
  sector_average$x[i] <- seeds$x[rowID]
  sector_average$y[i] <- seeds$y[rowID]
}


# Prepare data for 3D plot for the average number of visits per persons to each sector
temp_data <- sector_average[,-3]

# Write .csv file
write.csv(x = temp_data, file = "other_stuff/blenderData_avg_visits.csv", 
          quote = FALSE, row.names = FALSE)

# Prepare data for 3D plot for the average number of visits per persons to each sector
## Remove NA values & remove wrong column
temp_data <- na.omit(sector_average)
temp_data <- temp_data[,-2]

# Write .csv file
write.csv(x = temp_data, file = "other_stuff/blenderData_avg_time_since_last_visit.csv", 
          quote = FALSE, row.names = FALSE)

# Colours
barColours <- viridisLite::viridis(n = 5, option = "D")

Range of values is 75 to 111321.

Total path length density plot

# Subset to only data from the encoding part
pathData <- positionData[positionData$trialType == 'encoding', ]

# Calculate the how much the perfect participant would need to travel
## Subset to only encoding trials
boolIndex       <- OLM_7T_trial_results$trialType == "encoding"
OLM_7T_encoding <- OLM_7T_trial_results[boolIndex, ]

# Function to calculate the path lengths 
calculate_path_length <- function(pos_x, pos_z){
  # Calculate difference between points and square them
  diff_X <- diff(pos_x)^2
  diff_z <- diff(pos_z)^2
  
  # Calculate the sum and take the square root
  dists_travelled <- sqrt(diff_X + diff_z)
  
  # Sum up all distances travelled to one path_length
  return(sum(dists_travelled))
}

# Calculate the path traveled for each trial and each subject
# Use pathData because it only includes encoding data
path_lenghts <- ddply(pathData, c("ppid", "trial"), 
                      summarise, 
                      distance = calculate_path_length(pos_x, pos_z))

# Calculate the total path lengths for the whole experiment
total_path_lengths <- ddply(path_lenghts, c("ppid"), 
                            summarise, distance = sum(distance))

# Get the range of the values to get the axis limits
axis_x <- calc_axis_limits(c(total_path_lengths$distance), 0)
  
# Create a box plot 
total_path_lengths_plot <- ggplot(total_path_lengths, aes(x = distance)) + 
  geom_density(fill = baseColours[1], linewidth = 0.2) + 
  geom_jitter(aes(y = -0.001), height = 0.0005, 
              size = 0.1, width = 0, colour = boxplot_pointColour) +
  labs(y = "Density", x = "Total path length (vm)", title = "") +
  scale_x_continuous(breaks = axis_x$breaks) + 
  coord_cartesian(xlim = axis_x$limits, 
                  expand = FALSE, ylim = c(-0.001*2, 0.0065)) +
  base_theme +
  theme(axis.text.y = element_blank(), 
        axis.ticks.y = element_blank(),
        axis.line.y = element_blank(),
        plot.margin = unit(c(1, 2.5, 1, 2.5), "mm"))

Event durations

# Load the event files
event_files <- list.files(path = "ignore_eventTable3/OLMe_7T_SpaNov_gradient_6lvl/", recursive = TRUE, full.names = TRUE)

# Remove "per_tra.txt" and "dur_tra.txt" from list
event_files <- event_files[!str_detect(event_files, "tra.txt")]

# Create list
event_list <- list()

# Load the event files
for(i in 1:length(event_files)){
  # Load current event file
  temp_event_file        <- read.table(event_files[i], header = FALSE, sep = "\t")
  temp_event_file$V3     <- NULL # Remove the third column
  names(temp_event_file) <- c("onset", "duration")
  
  # Split the file path to get information
  event_file_path_split   <- str_split_1(event_files[i], pattern = "/")
  temp_event_file$subject <- event_file_path_split[3]
  temp_event_file$run     <- event_file_path_split[5]
  
  # To extract the level of the event extract "lvl" plus an integer from string
  temp_event_file$lvl     <- str_extract(event_file_path_split[7], "lvl[0-9]+")
  
  # Add to list
  event_list[[i]] <- temp_event_file
}

# Combine to data frame
event_df <- as.data.frame(rbindlist(event_list))

# Calculate offset
event_df$offset <- event_df$onset + event_df$duration

# Convert lvl label to number
event_df$lvl_num <- as.numeric(str_extract(event_df$lvl, "[0-9]+"))

# Change run label
event_df$run <- ifelse(event_df$run == "run-01", "Run 1", "Run 2")


# Get the range of the values to get the axis limits
axis_x <- calc_axis_limits(event_df$duration, 0)


# Create a box plot 
event_duration_plot <- ggplot(event_df, aes(x = duration)) + 
  geom_density(fill = baseColours[1], size = 0.2) + 
  geom_jitter(aes(y = -0.1), height = 0.05, 
              size = 0.1, width = 0, colour = boxplot_pointColour, alpha = 0.1) +
  labs(y = "Density", x = "Event duration (s)", title = "") +
  scale_x_continuous(breaks = axis_x$breaks) + 
  coord_cartesian(xlim = axis_x$limits, 
                  expand = FALSE, ylim = c(-0.1*2, 0.8)) +
  base_theme +
  theme(axis.text.y = element_blank(), 
        axis.ticks.y = element_blank(),
        axis.line.y = element_blank(),
        plot.margin = unit(c(1, 2.5, 1, 2.5), "mm"))

Histograms of heading angles

# Set seed
set.seed(20241123)

# SD if data was perfectly uniform
perfectUni_sd <- sd(runif(9999999, min = 0, max = 359.9999))

# Calculate SD for each sector
sector_rotation <- ddply(no_control, c("sector"), summarise, 
                         rot_y_sd = sd(rot_y))

# Create bins like it is done in voronoi_tessellation_grid_binning_2d
# This is code directly from that function to get the coordinates of each sector
xLim    <- sort(limValues)
yLim    <- sort(limValues)
byValue <- ((xLim[2] - xLim[1])/(numSeeds - 1))
xRange  <- seq(from = xLim[1], to = xLim[2], by = byValue)
xStepSize <- xRange[2] - xRange[1]
x_shiftValue <- xStepSize/4
yRange <- seq(from = yLim[1], to = yLim[2], by = xStepSize * (sqrt(3)/2))
yRange <- yRange[1:length(xRange)]
yRange <- yRange + (yLim[2] - max(yRange))/2
seeds <- data.frame(x = xRange[1], y = yRange)
shiftIndex <- rep(c(1, -1), length.out = nrow(seeds))
seeds$x <- seeds$x + shiftIndex * x_shiftValue
for(i in 2:length(xRange)){
  seeds <- rbind(seeds, data.frame(x = xRange[i] + shiftIndex * x_shiftValue, y = yRange))
}
seeds$sector <- row.names(seeds)

# Add x & y coordinates to sector_rotation For this just loop the df
sector_rotation$x <- NA
sector_rotation$y <- NA
for(i in 1:nrow(sector_rotation)){
  # Get current sector
  currentSector <- sector_rotation$sector[i]
  
  # Get corresponding row index in seeds
  rowID <- which(seeds$sector == currentSector)
  
  # Assign the correct coordinates based on rowID
  sector_rotation$x[i] <- seeds$x[rowID]
  sector_rotation$y[i] <- seeds$y[rowID]
}


# Get the range of the values to get the axis limits
axis_x <- calc_axis_limits(round(sector_rotation$rot_y_sd, 2), 0)
axis_y <- calc_axis_limits(c(0, 7.6), 0)

# Visualise SDs for all sectors
heading_angle_plot <- ggplot(sector_rotation, aes(x = rot_y_sd)) +
  geom_histogram(colour = baseColours[1], fill = baseColours[1]) + 
  geom_vline(xintercept = perfectUni_sd, colour = "black", linetype = 2, size = 0.5) +
  scale_x_continuous(breaks = axis_x$breaks) + 
  scale_y_continuous(breaks = round(axis_y$breaks)) + 
  coord_cartesian(xlim = axis_x$limits, 
                  ylim = round(axis_y$limits), 
                  expand = FALSE) +
  labs(x = "sd(heading angle)", y = "Count") +
  base_theme +
  theme(plot.margin = unit(c(1, 2.5, 1, 2.5), "mm"))

Box plot of the locomotion states

Calculate overall time spent on translation, rotation and being stationary:

# The amount we found the values to avoid false positive
rotation_round  <- 2 # round rotation values to this decimal point

# Function to determine which state a time point belongs to
what_state <- function(rot_y, moving2){
  # Get angles 
  angle1 <- rot_y[2:length(rot_y)]
  angle2 <- rot_y[1:(length(rot_y)-1)]
  
  # Calculate the amount was rotated between the time points and then rotate
  rotated <- c(NA, round(angularDifference(angle1, angle2), rotation_round))
  
  # If rotation is zero called it stationary, otherwise rotation
  tra_rot_sta <- ifelse(abs(rotated) == 0 | is.na(rotated), 
                        'stationary', 'rotation') 
  
  # Set time point to translation based the information saved by unity
  tra_rot_sta[moving2] <- 'translation'
  
  # Return
  return(tra_rot_sta)
}

# Convert moving to boolean
pathData$moving2 <- ifelse(pathData$moving == "True", TRUE, FALSE)

# Calculate the state for each time points for each subject and each trial
pathData <- ddply(pathData, c("ppid", "trial"), 
                  mutate, locomotion = what_state(rot_y, moving2))

# Calculate the total percentage for each subject
locomotion_per <- ddply(pathData, c("ppid"), summarise,
                        translation = mean(locomotion == "translation"),
                        rotation = mean(locomotion == "rotation"),
                        stationary = mean(locomotion == "stationary"))

# Convert from wide to long format
locomotion_per_long  <- reshape2::melt(locomotion_per, id.vars = c("ppid"))

# Convert proportions to percentage
locomotion_per_long$value <- locomotion_per_long$value * 100

# Plot showing percentage of locomotion
## Create colours and axis limits
currentColours <- colorRampPalette(c(baseColours[1], baseColours[2]))(3)
axis_y <- calc_axis_limits(locomotion_per_long$value, axisExpand)

## Create the plot
locomotion_states_plot <- ggplot(locomotion_per_long, aes(x = variable, y = value, fill = variable)) +
  geom_line(aes(group = ppid), size = 0.05) +
  geom_point(size = 0.1, colour = boxplot_pointColour) +
  geom_boxplot(colour = boxplot_borderColour, 
               outlier.shape = NA, width = 0.4, 
               size = 0.2, alpha = 0.7) +
  scale_fill_manual(values = currentColours) +
  base_theme + 
  scale_x_discrete(guide = guide_axis(n.dodge = 2)) +
  scale_y_continuous(breaks = axis_y$breaks) + 
  coord_cartesian(xlim = c(0.5, 3.5), 
                  ylim = axis_y$limits, expand = FALSE) +
  theme(legend.position = "none", plot.margin = unit(c(1, 2, 1, 2), "mm")) +
  labs(title = "", x = "Locomotion", y = "Percentage")

Novelty score over time model plot

# Load the models
load("fitted_brms_models/SpaNov_contPM_smo6_m_noveltyScore.Rdata")

# Background: https://www.andrewheiss.com/blog/2022/11/29/conditional-marginal-marginaleffects/
# Also: https://discourse.mc-stan.org/t/random-slopes-with-posterior-linpred/8004

# Get all subjects
subjects   <- unique(m_noveltyScore_run1$data$subject)
x1_range   <- c(-1, 2) # Extent range a bit to look better range(m_noveltyScore_run1$data$s_onset_rel) = -0.9748797  1.8069553
x1_points  <- seq(from = x1_range[1], to = x1_range[2], length.out = 10)
x2_range   <- c(-1, 1.5) # Extent range a bit to look better range(m_noveltyScore_run2$data$s_onset_rel) = -0.8343619  1.3502976
x2_points  <- seq(from = x2_range[1], to = x2_range[2], length.out = 10)
pred1_list <- list()
pred2_list <- list()

# Loop through all subjects
for(i in 1:length(subjects)){
  # Run 1
  ## Get prediction for this subject
  subj_pred <- predictions(m_noveltyScore_run1, 
                           newdata = datagrid(s_onset_rel = x1_points, subject = subjects[i]), 
                           by = "s_onset_rel", re_formula = ~ (s_onset_rel | subject))
  
  ## Convert to data frame, add subject and get minimum value for later colouring
  subj_pred         <- as.data.frame(subj_pred)
  subj_pred$subject <- subjects[i]
  subj_pred$min     <- min(subj_pred$estimate)
  
  ## Add to list 
  pred1_list[[i]] <- subj_pred
  
  # Run 2
  ## Get prediction for this subject
  subj_pred <- predictions(m_noveltyScore_run2, 
                           newdata = datagrid(s_onset_rel = x2_points, subject = subjects[i]), 
                           by = "s_onset_rel", re_formula = ~ (s_onset_rel | subject))
  
  ## Convert to data frame, add subject and get minimum value for later colouring
  subj_pred         <- as.data.frame(subj_pred)
  subj_pred$subject <- subjects[i]
  subj_pred$min     <- min(subj_pred$estimate)
  
  ## Add to list 
  pred2_list[[i]] <- subj_pred
}

# Convert lists to data frames
pred1_df <- rbindlist(pred1_list)
pred1_df <- as.data.frame(pred1_df)
pred2_df <- rbindlist(pred2_list)
pred2_df <- as.data.frame(pred2_df)

# Create plots
## Run 1
axis_x <- calc_axis_limits(c(-1, 2), 0)
axis_y <- calc_axis_limits(c(-2, 0.6), 0)

p1 <- ggplot(data = pred1_df, aes(x = s_onset_rel, y = estimate, group = subject, colour = min)) +
  geom_line(size = 0.05) +
  scale_color_viridis_c() + base_theme +
  theme(legend.position = "none",
        plot.margin = unit(c(1, 2, 1, 2), "mm")) +
  labs(title = "Run 1", x = "Time (scaled)", y = "Novelty score") +
  scale_x_continuous(breaks = axis_x$breaks) +
  scale_y_continuous(breaks = axis_y$breaks) +
  coord_cartesian(xlim = axis_x$limits, ylim = axis_y$limits,
                  expand = FALSE) 

## Run 2
axis_x <- calc_axis_limits(c(-1, 1.5), 0)
axis_y <- calc_axis_limits(c(-1.5, 0.5), 0)

p2 <- ggplot(data = pred2_df, aes(x = s_onset_rel, y = estimate, group = subject, colour = min)) +
  geom_line(size = 0.05) +
  scale_color_viridis_c() + base_theme +
  theme(legend.position = "none",
        plot.margin = unit(c(1, 2, 1, 2), "mm")) +
  labs(title = "Run 2", x = "Time (scaled)", y = "Novelty score") +
  scale_x_continuous(breaks = axis_x$breaks) +
  scale_y_continuous(breaks = axis_y$breaks) +
  coord_cartesian(xlim = axis_x$limits, ylim = axis_y$limits,
                  expand = FALSE) 

# Combine and save
novelty_score_model_plot <- plot_grid(p1, p2, ncol = 2)

Combine to one figure

# Combine
Figure1_combined <- plot_grid(plot_grid(total_path_lengths_plot, event_duration_plot, heading_angle_plot, locomotion_states_plot, align = "h", nrow = 1),
                              plot_grid(NULL, novelty_score_model_plot), align = "v", nrow = 2)
# Save
ggsave(Figure1_combined,
       filename = paste0(figurePath, "Figure_1_lower_part.png"), 
       dpi = 1000,
       width = 170,
       height = 85,
       units = "mm")

Descriptive statistics of exploration

Mean and SD of sd(heading angles)

# Make report string
val  <- sector_rotation$rot_y_sd
str1 <- mean_SD_str2(val, report_type, digits1, rounding_type)
  • Spread of heading direction in SD: M = 92 (SD = 28)

Calculating the number of visits per participant

# Calculate the number of visited sectors
num_visitedSectors <- ddply(no_control, c("ppid"), summarise, 
                            number = length_uniq(sector))

# Make report string
val  <- num_visitedSectors$number
str1 <- mean_SD_str2(val, report_type, digits1, rounding_type)

Number of visited sector per participant: M = 50 (SD = 2)

Describe two measures on which the novelty score is based

# Convert list to data frame
data <- as.data.frame(rbindlist(subj_list, idcol = "subject"))

# Add subjects' R number
data$ppid <- subjIDs_R[data$subject]

# Function to match runStartTime
find_runStartTime <- function(ppid, run){
  # Get corresponding to find the run in the trial data
  anonKey <- lookupTable$anonKey[lookupTable$Rnum == ppid[1]]
  
  # Use the anonKey & run to get runStartTime
  bool_index  <- OLM_7T_trial_results$subject == anonKey & 
                 OLM_7T_trial_results$block_num == run
  runStartTime <- OLM_7T_trial_results$runStartTime[bool_index]
  
  return(runStartTime[1])
}

# Use the function to find the correct run start time
data <- ddply(data, c("subject", "ppid", "run"), mutate, 
              runStartTime = find_runStartTime(ppid, run))

# Make time relative to the start of the run. The real onset times will be 
# slightly different but this will not matter for this. 
data$onset_rel <- data$onset - data$runStartTime

# Add run type
data$runType <- "encoding"
data$runType[data$run == 2 | data$run == 4] <- "retrieval"

# Subset to only encoding
data_sub <- data[data$runType == "encoding", ]

# Change run number to match the description in paper. Run 3 is Encoding Run 2
data_sub$run[data_sub$run == 3] <- 2

# Convert run to factor
data_sub$f_run <- as.factor(data_sub$run)


# Calculate descriptive stats for the measures
data_sub_agg1 <- ddply(data_sub, c("subject"), summarise,
                      max_visits = max(visits),
                      min_lastVisit = min(lastVisit, na.rm = TRUE),
                      max_lastVisit = max(lastVisit, na.rm = TRUE),
                      median_lastVisit = median(lastVisit, na.rm = TRUE),
                      mean_lastVisit = mean(lastVisit, na.rm = TRUE))

# Calculate the average duration of events from lastVisits for that exclude NA values
data_sub_agg2 <- ddply(na.omit(data_sub), c("subject"), summarise,
                       mean_duration = mean(duration),
                       median_duration = median(duration),
                       min_duration = min(duration),
                       max_duration = max(duration))

# Create strings for report
val  <- data_sub_agg1$max_visits
str1 <- mean_SD_str2(val, report_type, digits1, rounding_type)
val  <- data_sub_agg1$min_lastVisit
str2 <- mean_SD_str2(val, report_type, digits1, rounding_type, "s")
val  <- data_sub_agg1$max_lastVisit
str3 <- mean_SD_str2(val, report_type, digits1, rounding_type, "s")
val  <- data_sub_agg2$mean_duration
str4 <- mean_SD_str2(val, report_type, digits1, rounding_type, "s")
val  <- data_sub_agg2$min_duration
str5 <- mean_SD_str2(val, report_type, digits1, rounding_type, "s")
val  <- data_sub_agg2$max_duration
str6 <- mean_SD_str2(val, report_type, digits1, rounding_type, "s")
  • Maximum visit per sector for the participants was M = 18 (SD = 1.6) by the time of the second run.
  • The minimum time between visits varied between participants M = 6.7 s (SD = 2.3 s).
  • The maximum time between visits varied between participants M = 1300 s (SD = 310 s).
  • The average duration of the events for the time since varied between participants M = 2.5 s (SD = 0.4 s) with an average minimum of M = 0.6 s (SD = 0.0023 s) and an average maximum of M = 12 s (SD = 5.3 s).

Locomotion during exploration

Report the average values for the three locomotion states

val  <- locomotion_per$translation * 100
str1 <- mean_SD_str2(val, report_type, digits1, rounding_type, "%")
val  <- locomotion_per$rotation  * 100
str2 <- mean_SD_str2(val, report_type, digits1, rounding_type, "%")
val  <- locomotion_per$stationary * 100
str3 <- mean_SD_str2(val, report_type, digits1, rounding_type, "%")
  • Translation: M = 45 % (SD = 5.4 %)
  • Rotation: M = 19 % (SD = 3.8 %)
  • Stationary: M = 36 % (SD = 3.8 %)

Travelled path length

# Create the report string
str1 <- mean_SD_str2(total_path_lengths$distance, 
                     report_type, digits1, rounding_type, "vm")

# Create .csv for group-level analysis
write.csv(total_path_lengths, 
          file = paste0(path2data, "SpaNov_pathLength_designMatrix_data.csv"), 
          quote = FALSE, 
          row.names = FALSE)

The average total path length was M = 3200 vm (SD = 140 vm)

Event duration

# Make report string
val  <- event_df$duration
str1 <- mean_SD_str2(val, report_type, digits1, rounding_type)

Event duration: M = 2.5 (SD = 2.1)

Bayesian hierarchical modelling of novelty score

# Create report strings
str1 <- brms_fixef_report(fixef(m_noveltyScore_run1)[2,])
str2 <- brms_fixef_report(fixef(m_noveltyScore_run2)[2,])
  • Novelty score slope Run 1 : = -0.48 (95 % CI [-0.56, -0.41])
  • Novelty score slope Run 2 : = -0.71 (95 % CI [-0.75, -0.66])

\[ \begin{align*} y & \sim \operatorname{Student}(\nu, \mu, \sigma) \\ \mu & = time + (time | subject) \end{align*} \]

Alternative way to describe: https://rpsychologist.com/r-guide-longitudinal-lme-lmer

Mapping spatial novelty across the hippocampal long axis

Load the results from the linear contrast (Level 1 > Level 2 > Level 3 > …). All xiis from this contrast start with GLM1.

  • Positive values: Higher activity for familiar sectors (c1)
  • Negative values: Higher activity for novel sectors (c2)
# Folder where the images are
ciftiFolder <- "/SpaNov/OLMe_7T_SpaNov_gradient_6lvl_smo4_MSMAll/cope7.feat/stats/vwc/"

# Other parameters
minClusterSize   <- 20

# Choose the files
zMap_file        <- paste0(path2imaging_results2, ciftiFolder, "results_lvl2cope1_dat_ztstat_c1.dscalar.nii")
pMap1_file       <- paste0(path2imaging_results2, ciftiFolder, "results_lvl2cope1_dat_ztstat_cfdrp_c1.dscalar.nii")
pMap2_file       <- paste0(path2imaging_results2, ciftiFolder, "results_lvl2cope1_dat_ztstat_cfdrp_c2.dscalar.nii")
clusterMap1_file <- paste0(path2imaging_results2, ciftiFolder, "results_lvl2cope1_dat_ztstat_cfdrp_c1_clusters.dscalar.nii")
clusterMap2_file <- paste0(path2imaging_results2, ciftiFolder, "results_lvl2cope1_dat_ztstat_cfdrp_c2_clusters.dscalar.nii")
betaMap_file     <- paste0(path2imaging_results2, ciftiFolder, "Y1.dtseries.nii")

# Load all maps as xiis
GLM1_zMap_xii <- read_cifti(zMap_file, brainstructures = "all", 
                            surfL_fname = surfLeft, 
                            surfR_fname = surfRight)
GLM1_pMap1_xii <- read_cifti(pMap1_file, brainstructures = "all", 
                            surfL_fname = surfLeft, 
                            surfR_fname = surfRight)
GLM1_pMap2_xii <- read_cifti(pMap2_file, brainstructures = "all", 
                            surfL_fname = surfLeft, 
                            surfR_fname = surfRight)
GLM1_clusterMap1_xii <- read_cifti(clusterMap1_file, brainstructures = "all", 
                            surfL_fname = surfLeft, 
                            surfR_fname = surfRight)
GLM1_clusterMap2_xii <- read_cifti(clusterMap2_file, brainstructures = "all", 
                            surfL_fname = surfLeft, 
                            surfR_fname = surfRight)
GLM1_betaMap_xii <- read_cifti(betaMap_file, brainstructures = "all", 
                            surfL_fname = surfLeft, 
                            surfR_fname = surfRight)

# Create cluster tables based on the maps
GLM1_cluster1 <- cifti_cluster_report(zMap_file, 
                            clusterMap1_file, 
                            surfLeft, 
                            surfRight,
                            parcellationFile, 
                            minClusterSize,
                            FALSE)
GLM1_cluster2 <- cifti_cluster_report(zMap_file, 
                            clusterMap2_file, 
                            surfLeft, 
                            surfRight,
                            parcellationFile, 
                            minClusterSize,
                            FALSE)
# Round values to in order to report them
GLM1_cluster1$cluster_values$zValue_max   <- signif(GLM1_cluster1$cluster_values$zValue_max, digits1)
GLM1_cluster1$cluster_values$zValue_min   <- signif(GLM1_cluster1$cluster_values$zValue_min, digits1)
GLM1_cluster1$cluster_values$cluster_mass <- signif(GLM1_cluster1$cluster_values$cluster_mass, digits1)

GLM1_cluster2$cluster_values$zValue_max   <- signif(GLM1_cluster2$cluster_values$zValue_max, digits1)
GLM1_cluster2$cluster_values$zValue_min   <- signif(GLM1_cluster2$cluster_values$zValue_min, digits1)
GLM1_cluster2$cluster_values$cluster_mass <- signif(GLM1_cluster2$cluster_values$cluster_mass, digits1)

# Write tsv files so it's easier to edit
## Positive clusters
### Combine to one table
region_labels  <- GLM1_cluster1$cluster_labels[, 4]
combined_table <- cbind(GLM1_cluster1$cluster_values, region_labels)

### Add places between the region labels
combined_table$region_labels <- str_replace_all(combined_table$region_labels, 
                                                pattern = ",",
                                                replacement = ", ")

### Write as .txt file
write.table(combined_table, file = "figures/SpaNov/tables/GLM1_c1.txt", 
            quote = FALSE,
            row.names = FALSE,
            sep = '\t')

## Negative clusters
### Combine to one table
region_labels  <- GLM1_cluster2$cluster_labels[, 4]
combined_table <- cbind(GLM1_cluster2$cluster_values, region_labels)

### Add places between the region labels
combined_table$region_labels <- str_replace_all(combined_table$region_labels, 
                                                pattern = ",",
                                                replacement = ", ")

### Write as .txt file
write.table(combined_table, file = "figures/SpaNov/tables/GLM1_c2.txt", 
            quote = FALSE,
            row.names = FALSE,
            sep = '\t')

Z-statistic over MNI

# Create subset and indices
## Get only the MNI coordinates from the hippocampus
HC_data_left      <- MNI_coord[str_starts(MNI_coord$region, pattern = "Hippocampus-L"), ]
HC_data_right     <- MNI_coord[str_starts(MNI_coord$region, pattern = "Hippocampus-R"), ]

## Get the indices for the left and the right hippocampus
index_bool_right  <- str_starts(GLM1_zMap_xii$meta$subcort$labels, pattern = "Hippocampus-R")
index_bool_left   <- str_starts(GLM1_zMap_xii$meta$subcort$labels, pattern = "Hippocampus-L")

## Split the projected HC into left and right
projected_HC_L <- projected_HC[projected_HC$region == "Hippocampus-L", ]
projected_HC_R <- projected_HC[projected_HC$region == "Hippocampus-R", ]

# Add the z-statistic to the HC data as well as the projected position value
## Left hippocampus
### Prepare temporary df
values <- GLM1_zMap_xii$data$subcort[index_bool_left, ]
tempDF <- cbind(HC_data_left, data.frame(z_stat = values, Hemisphere = "left"))
tempDF$position <- NA

### Add projected position
for(i in 1:nrow(projected_HC_L)){
  # Get the values for the projection
  y <- projected_HC_L$y[i]
  z <- projected_HC_L$z[i]
  pos <- projected_HC_L$position[i]
  
  # Add position values
  tempDF[tempDF$y == y & tempDF$z == z, "position"] <- pos
}

## Re-name the df
GLM1_zMap_xii_HC <- tempDF

## Right hippocampus
### Prepare temporary df
values <- GLM1_zMap_xii$data$subcort[index_bool_right, ]
tempDF <- cbind(HC_data_right, data.frame(z_stat = values, Hemisphere = "right"))
tempDF$position <- NA

### Add projected position
for(i in 1:nrow(projected_HC_R)){
  # Get the values for the projection
  y <- projected_HC_R$y[i]
  z <- projected_HC_R$z[i]
  pos <- projected_HC_R$position[i]
  
  # Add position values
  tempDF[tempDF$y == y & tempDF$z == z, "position"] <- pos
}

### Concatenate with df
GLM1_zMap_xii_HC <- rbind(GLM1_zMap_xii_HC, tempDF)

# Calculate the average z-statistic for each y-coordinate
GLM1_zMap_xii_HC_agg <- ddply(GLM1_zMap_xii_HC, c("Hemisphere", "y"), summarise,
                              z_stat = mean(z_stat))

# Create the plot
p1 <- ggplot(GLM1_zMap_xii_HC_agg, aes(x = y, y = z_stat, colour = Hemisphere)) + 
  geom_line(size = 0.5) + 
  geom_hline(yintercept = 0, linetype = "dashed") +
  labs(x = "MNI y coordinate", y = "Average z-statistic") +
  scale_colour_manual(values = baseColours) +
  base_theme +
  theme(legend.position = "none")

.

Create unthresholded hippocampal z-map

For unthresholded visualisation of the hippocampal GLM results, we create a version of the z-map that set everything other than the hippocampus to zero.

# (This chunk only needs to run once, so eval is set to FALSE)
# Extreme comparison: Level 1 vs. Level 6
ciftiFolder <- "/SpaNov/OLMe_7T_SpaNov_gradient_6lvl_smo4_MSMAll/cope14.feat/stats/vwc/"
zMap_file1  <- paste0(path2imaging_results2, ciftiFolder, "results_lvl2cope1_dat_ztstat_c1.dscalar.nii")
zMap_xii1   <- read_cifti(zMap_file1, brainstructures = "all", 
                  surfL_fname = surfLeft, surfR_fname = surfRight)

# Linear contrast from Level 1 to Level 6
ciftiFolder <- "/SpaNov/OLMe_7T_SpaNov_gradient_6lvl_smo4_MSMAll/cope7.feat/stats/vwc/"
zMap_file2  <- paste0(path2imaging_results2, ciftiFolder, "results_lvl2cope1_dat_ztstat_c1.dscalar.nii")
zMap_xii2   <- read_cifti(zMap_file2, brainstructures = "all", 
                  surfL_fname = surfLeft, surfR_fname = surfRight)

# Create mask for everything other than right and left hippocampus
currentMask <- zMap_xii1$meta$subcort$labels != "Hippocampus-R" & 
               zMap_xii1$meta$subcort$labels != "Hippocampus-L"

# Create new variables
zMap_xii1_HC <- zMap_xii1
zMap_xii2_HC <- zMap_xii2

# Set everything within this mask to zero
zMap_xii1_HC$data$subcort[currentMask, ] <- 0
zMap_xii2_HC$data$subcort[currentMask, ] <- 0

# Also set cortical values to zero
zMap_xii1_HC$data$cortex_left  <- matrix(as.integer(0), nrow = 29696, ncol = 1)
zMap_xii1_HC$data$cortex_right <- matrix(as.integer(0), nrow = 29716, ncol = 1)
zMap_xii2_HC$data$cortex_left  <- matrix(as.integer(0), nrow = 29696, ncol = 1)
zMap_xii2_HC$data$cortex_right <- matrix(as.integer(0), nrow = 29716, ncol = 1)

# Create new names
new_zMap_file1 <- str_replace(zMap_file1, pattern = ".dscalar.nii", 
                              replacement = "_onlyHC.dscalar.nii")
new_zMap_file2 <- str_replace(zMap_file2, pattern = ".dscalar.nii", 
                              replacement = "_onlyHC.dscalar.nii")

# Write new cifti files
write_cifti(xifti = zMap_xii1_HC, cifti_fname = new_zMap_file1)
write_cifti(xifti = zMap_xii2_HC, cifti_fname = new_zMap_file2)

Spatial novelty-gradient in hippocampus

Figure 2

# Load hippocampal gradient data
load("intermediate_data/SpaNov_gradient_data.RData")

# Average across the MNI_y axis
HC_data_agg_pos <- ddply(HC_data, c("position", "Hemisphere"), summarise, n = length(min), min = mean(min), max = mean(max))

# Labels
conN       <- 6
novLabels  <- paste0('Level ', 1:conN)

# Create new data frame just for plotting
HC_data_plotting <- HC_data_agg_pos
HC_data_plotting$Hemisphere2 <- ifelse(HC_data_plotting$Hemisphere == "right",
                                       "Right hippocampus", "Left hippocampus")

# Create plots
scatter_min <- ggplot(HC_data_plotting, aes(x = -position, y = min)) + 
  facet_grid(~Hemisphere2, scales = "free_x") + 
  geom_point(aes(colour = min), size = 0.5) +
  geom_smooth(method = "lm", formula = y ~ x , size = 0.5, colour = "black") +
  labs(x = "Distance to most anterior part (mm)", y = "Novelty Preference\n(min. z-stat)") + 
  scale_x_continuous(breaks = c(-45, 0), labels = c("45", "0")) +
  scale_y_continuous(breaks = 1:6, labels = novLabels, limits = c(1, 6)) +
  scale_colour_viridis_c(option = "H", limits = c(1, 6), direction = -1) +
  base_theme2 +
  theme(legend.position = "none")

scatter_max <- ggplot(HC_data_plotting, aes(x = -position, y = max)) + 
  facet_grid(~Hemisphere2, scales = "free_x") + 
  geom_point(aes(colour = max), size = 0.5) +
  geom_smooth(method = "lm", formula = y ~ x , size = 0.5, colour = "black") +
  scale_x_continuous(breaks = c(-45, 0), labels = c("45", "0")) +
  labs(x = "Distance to most anterior part (mm)", y = "Novelty Preference\n(max. z-stat)") + 
  scale_y_continuous(breaks = 1:6, labels = novLabels, limits = c(1, 6)) +
  scale_colour_viridis_c(option = "H", limits = c(1, 6), direction = -1) +
  base_theme2 +
  theme(legend.position = "none")

# Combine figure and save
# Hemisphere-specific slopes
hemisphere_slopes <- plot_grid(scatter_min, scatter_max, ncol = 1)

# Dimensions 92 mm x 80 mm
ggsave(hemisphere_slopes,
       filename = paste0(figurePath, "Figure_2c_scatter_plot.png"),
       dpi = dpi,
       width = 92,
       height = 80,
       units = "mm")

# Average across the MNI_y axis and hemisphere
HC_data_agg_pos2 <- ddply(HC_data, c("position"), summarise, n = length(min), min = mean(min), max = mean(max))

# Create plots
scatter_min2 <- ggplot(HC_data_agg_pos2, aes(x = -position, y = min)) + 
  geom_point(aes(colour = min), size = 0.5) +
  geom_smooth(method = "lm", formula = y ~ x , size = 0.5, colour = "black") +
  labs(x = "Distance to most anterior part (mm)", y = "Novelty Preference\n(min. z-stat)") + 
  scale_x_continuous(breaks = c(-45, 0), labels = c("45", "0")) +
  scale_y_continuous(breaks = 1:6, labels = novLabels, limits = c(1, 6)) +
  scale_colour_viridis_c(option = "H", limits = c(1, 6), direction = -1) +
  base_theme2 +
  theme(legend.position = "none")

scatter_max2 <- ggplot(HC_data_agg_pos2, aes(x = -position, y = max)) + 
  geom_point(aes(colour = max), size = 0.5) +
  geom_smooth(method = "lm", formula = y ~ x , size = 0.5, colour = "black") +
  scale_x_continuous(breaks = c(-45, 0), labels = c("45", "0")) +
  labs(x = "Distance to most anterior part (mm)", y = "Novelty Preference\n(max. z-stat)") + 
  scale_y_continuous(breaks = 1:6, labels = novLabels, limits = c(1, 6)) +
  scale_colour_viridis_c(option = "H", limits = c(1, 6), direction = -1) +
  base_theme2 +
  theme(legend.position = "none")

# Combine figure and save
# Hemisphere-specific slopes
slopes <- plot_grid(scatter_min2, scatter_max2, ncol = 1)

# Dimensions 92 mm x 80 mm
ggsave(slopes,
       filename = paste0(figurePath, "Figure_2c_scatter_plot2.png"),
       dpi = dpi,
       width = 92/1.5,
       height = 80,
       units = "mm")

Permutation analyses

permutation_analysis <- function(data, lm_formula, nIter, colName, imageName){
  # Initialise results list
  results <- list()
  
  # Select correct column for analysis and create new data frame
  data$val     <- data[, colName]
  data2shuffle <- data
  
  # Calculate empirical values and save to list
  results$lm <- lm(lm_formula, data = data)
  numCoef    <- length(results$lm$coefficients) - 1 # Ignoring the intercept
  
  # Start cluster
  my.cluster <- parallel::makeCluster(detectCores() - 2, type = "PSOCK")
  
  #register it to be used by %dopar%
  doParallel::registerDoParallel(cl = my.cluster)
  
  # Run parallel loop
  permuted_values <- foreach(i = 1:nIter, .combine = 'c', .packages = 'plyr') %dopar% {
    data2shuffle$val <- sample(data$val)
    
    # Fit model
    temp_lm <- lm(lm_formula, data = data2shuffle)
    
    # Add values 
    temp_est <- as.data.frame(matrix(as.numeric(temp_lm$coefficients)[-1], ncol = numCoef))
    names(temp_est) <- names(results$lm$coefficients)[-1]
    list(temp_est)
  }
  
  # Stop cluster again
  parallel::stopCluster(cl = my.cluster) 
  
  # Add to results
  results$permuted_values <- as.data.frame(rbindlist(permuted_values))
  
  # Save to disk
  if(!missing(imageName)){
    save(results, file = paste0("intermediate_data/", imageName))
  }
  
  # Return value
  return(results)
}
Minimum
# Rename to be unique
grad_HC1 <- HC_data_agg_pos

# Check if the code needs to be run again. This is done by checking the md5 has for the data frame that is used in the calculation if it is different from the hash sum saved in md5_hash_table.csv, it is re-run. This is to avoid having to re-run everything each time I work on the data. 
runCodeAgain1 <- check_if_md5_hash_changed(grad_HC1, hash_table_name = "SpaNov_md5_hash_table.csv")
Model with interaction with hemisphere
# Seed
set.seed(19911225)

# Other input
lm_formula    <- "val ~ position * Hemisphere"
nIter         <- 100000
colName       <- "min"
imageName    <- "SpaNov_permut_HC_grad_analysis1.RData"

# Run if necessary
if(runCodeAgain1){
  grad_min_permut1 <- permutation_analysis(grad_HC1, lm_formula, 
                                       nIter, colName, imageName)
} else {
  load(paste0("intermediate_data/", imageName))
  grad_min_permut1 <- results
}
# Select values for plotting
dist              <- grad_min_permut1$permuted_values[,3]
critVal           <- grad_min_permut1$lm$coefficients[4]
grad_min_permut1_p2 <- pValue_from_nullDist(critVal, dist, "two.sided")

# Get axis
axis_x <- calc_axis_limits(dist, axisExpand, digits = digits1, "round")

# Create the plot
interaction_min <- ggplot(data.frame(x = dist), aes(x = x)) + 
  geom_histogram(bins = 100, colour = baseColours[1], fill = baseColours[1]) + 
  geom_vline(xintercept = critVal, colour = "#ff3300", linetype = 2, linewidth = 0.5) +
  labs(title = "Interaction (min.)", x = "Parameter estimate", y = "Count") +
  scale_x_continuous(breaks = axis_x$breaks) + 
  coord_cartesian(xlim = axis_x$limits, 
                  expand = FALSE) +
  base_theme +
  theme(plot.margin = unit(c(0, 2.5, 0, 0), "mm"))
Average across hemispheres
# Rename to be unique
grad_HC2 <- HC_data_agg_pos2

# Check if the code needs to be run again. This is done by checking the md5 has for the data frame that is used in the calculation if it is different from the hash sum saved in md5_hash_table.csv, it is re-run. This is to avoid having to re-run everything each time I work on the data. 
runCodeAgain2 <- check_if_md5_hash_changed(grad_HC2, hash_table_name = "SpaNov_md5_hash_table.csv")
# Seed
set.seed(20250319)

# Other input
lm_formula    <- "val ~ position"
nIter         <- 100000
colName       <- "min"
imageName    <- "SpaNov_permut_HC_grad_analysis2.RData"

# Run if necessary
if(runCodeAgain2){
  grad_min_permut1_avg <- permutation_analysis(grad_HC2, lm_formula, 
                                       nIter, colName, imageName)
} else {
  load(paste0("intermediate_data/", imageName))
  grad_min_permut1_avg <- results
}
# Select values for plotting
dist              <- grad_min_permut1_avg$permuted_values[,1]
critVal           <- grad_min_permut1_avg$lm$coefficients[2]
grad_min_permut1_avg_p1 <- pValue_from_nullDist(critVal, dist, "two.sided")

# # Create the plot
p1 <- ggplot(data.frame(x = dist), aes(x = x)) +
  geom_histogram(bins = 100, colour = baseColours[1], fill = baseColours[1]) +
  geom_vline(xintercept = critVal, colour = "#ff3300", linetype = 2) +
  labs(title = "Effect of position", x = "Parameter estimate", y = "Count") +
  theme_classic()
Maximum
Model with interaction with hemisphere
# Seed
set.seed(19911225)

# Other input
lm_formula    <- "val ~ position * Hemisphere"
nIter         <- 100000
colName       <- "max"
imageName    <- "SpaNov_permut_HC_grad_analysis3.RData"

# Run if necessary
if(runCodeAgain1){
  grad_max_permut1 <- permutation_analysis(grad_HC1, lm_formula, 
                                       nIter, colName, imageName)
} else {
  load(paste0("intermediate_data/", imageName))
  grad_max_permut1 <- results
}
# Select values for plotting
dist              <- grad_max_permut1$permuted_values[,3]
critVal           <- grad_max_permut1$lm$coefficients[4]
grad_max_permut1_p2 <- pValue_from_nullDist(critVal, dist, "two.sided")

# Get axis
axis_x <- calc_axis_limits(dist, axisExpand, digits = digits1, "round")

# Create the plot
interaction_max <- ggplot(data.frame(x = dist), aes(x = x)) + 
  geom_histogram(bins = 100, colour = baseColours[1], fill = baseColours[1]) + 
  geom_vline(xintercept = critVal, colour = "#ff3300", linetype = 2, linewidth = 0.5) +
  labs(title = "Interaction (max.)", x = "Parameter estimate", y = "Count") +
  scale_x_continuous(breaks = axis_x$breaks) + 
  coord_cartesian(xlim = axis_x$limits, expand = FALSE) +
  base_theme +
  theme(plot.margin = unit(c(0, 2.5, 0, 2.5), "mm"))
Average across hemispheres
# Seed
set.seed(20131)

# Other input
lm_formula    <- "val ~ position"
nIter         <- 100000
colName       <- "max"
imageName    <- "SpaNov_permut_HC_grad_analysis4.RData"

# Run if necessary
if(runCodeAgain2){
  grad_max_permut1_avg <- permutation_analysis(grad_HC2, lm_formula, 
                                       nIter, colName, imageName)
} else {
  load(paste0("intermediate_data/", imageName))
  grad_max_permut1_avg <- results
}
# Select values for plotting
dist              <- grad_max_permut1_avg$permuted_values[,1]
critVal           <- grad_max_permut1_avg$lm$coefficients[2]
grad_max_permut1_avg_p1 <- pValue_from_nullDist(critVal, dist, "two.sided")

# # Create the plot
p2 <- ggplot(data.frame(x = dist), aes(x = x)) +
  geom_histogram(bins = 100, colour = baseColours[1], fill = baseColours[1]) +
  geom_vline(xintercept = critVal, colour = "#ff3300", linetype = 2) +
  labs(title = "Effect of position", x = "Parameter estimate", y = "Count") +
  theme_classic()

Report stat of permutation analysis

summary(grad_min_permut1_avg$lm)
## 
## Call:
## lm(formula = lm_formula, data = data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.85145 -0.59263  0.04524  0.42017  2.16146 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.290691   0.101292  42.360  < 2e-16 ***
## position    -0.021074   0.004271  -4.935 1.67e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8017 on 203 degrees of freedom
## Multiple R-squared:  0.1071, Adjusted R-squared:  0.1027 
## F-statistic: 24.35 on 1 and 203 DF,  p-value: 1.671e-06
eff_min <- eta_squared(car::Anova(grad_min_permut1_avg$lm, type = 2))
kable(eff_min)
Parameter Eta2 CI CI_low CI_high
position 0.1071034 0.95 0.0490338 1
summary(grad_max_permut1_avg$lm)
## 
## Call:
## lm(formula = lm_formula, data = data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.1959 -0.4426  0.0021  0.5948  1.8369 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.508839   0.106573  42.308  < 2e-16 ***
## position    -0.026762   0.004493  -5.956 1.12e-08 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8435 on 203 degrees of freedom
## Multiple R-squared:  0.1488, Adjusted R-squared:  0.1446 
## F-statistic: 35.47 on 1 and 203 DF,  p-value: 1.123e-08
eff_max <- eta_squared(car::Anova(grad_max_permut1_avg$lm, type = 2))
kable(eff_max)
Parameter Eta2 CI CI_low CI_high
position 0.148756 0.95 0.081141 1
  • Minimum: interaction p = .082, slope (avg. across hemispheres) p < .001
  • Maximum: interaction p = .092, slope (avg. across hemispheres) p < .001

Extension of spatial novelty gradient to the posterior medial cortex

Figure 3

Yeo 7 parcellation ridge plot

# Get the names of the networks (-1 to remove the first row that just contains ???)
Yeo7_labels        <- Yeo7_xii$meta$cifti$labels$parcels[-1, ]
Yeo7_labels$Region <- row.names(Yeo7_labels)
row.names(Yeo7_labels)   <- NULL

# Convert the RGB values to hexcodes
Yeo7_labels$hexcol <- rgb(Yeo7_labels$Red, Yeo7_labels$Green, Yeo7_labels$Blue, 
                          maxColorValue = 1)

# Create basic data frame
Yeo_zValues_df <- data.frame(zValue = c(GLM1_zMap_xii$data$cortex_left, GLM1_zMap_xii$data$cortex_right),
                             Yeo7   = c(Yeo7_xii$data$cortex_left, Yeo7_xii$data$cortex_right))

# Add network label
Yeo_zValues_df$net_label <- NA
for(i in 1:nrow(Yeo7_labels)){
  # Select all rows in Yeo_zValues_df that have this key
  bool_index <- Yeo_zValues_df$Yeo7 == Yeo7_labels$Key[i]
  Yeo_zValues_df$net_label[bool_index] <- Yeo7_labels$Region[i]
}

# Find lowest z value that's significant
## Get the p-values and the z-values
GLM1_pValues1 <- get_all_points_from_xifti(GLM1_pMap1_xii)
GLM1_pValues2 <- get_all_points_from_xifti(GLM1_pMap2_xii)
GLM1_zValues  <- get_all_points_from_xifti(GLM1_zMap_xii)

## Subset to only significant values
GLM1_zValues1 <- GLM1_zValues[GLM1_pValues1 > cutOff]
GLM1_zValues2 <- GLM1_zValues[GLM1_pValues2 > cutOff]

# Exclude vertices that are not included in Yeo 7
Yeo_zValues_df_sub <- Yeo_zValues_df[Yeo_zValues_df$Yeo7 != 0, ]

# Order according to the mean of the network
Yeo_zValues_df_agg <- ddply(Yeo_zValues_df_sub, c("net_label"), summarise, 
                            avg_z = mean(zValue))

## First order alphabetically so we can apply the same order to GLM1_Yeo7
Yeo_zValues_df_agg <- Yeo_zValues_df_agg[order(as.character(Yeo_zValues_df_agg$net_label)), ]

## Now order based on the mean
mean_order         <- order(Yeo_zValues_df_agg$avg_z)
Yeo_zValues_df_agg <- Yeo_zValues_df_agg[mean_order, ]

# Order the colours in the same way
Yeo7_labels <- Yeo7_labels[order(as.character(Yeo7_labels$Region)), ]
Yeo7_labels <- Yeo7_labels[mean_order, ]

# Add the full names
Yeo7_labels$fullNames <- find_values_thru_matching(Yeo7_abbr, Yeo7_fullNames, Yeo7_labels$Region)

# Make net_label a factor with the correct order
Yeo_zValues_df_sub$net_label <- factor(Yeo_zValues_df_sub$net_label, 
                                       levels = Yeo_zValues_df_agg$net_label,
                                       labels = Yeo7_labels$fullNames,
                                       ordered = TRUE)

# Create plot
p1 <- ggplot(Yeo_zValues_df_sub, aes(x = zValue, y = net_label, fill = net_label)) + 
  geom_density_ridges(linewidth = 0.3) +
  scale_fill_manual(values = Yeo7_labels$hexcol) +
  geom_vline(xintercept = max(GLM1_zValues2), linetype = 2, linewidth = 0.3) +
  geom_vline(xintercept = min(GLM1_zValues1), linetype = 2, linewidth = 0.3) +
  base_theme + 
  coord_cartesian(ylim = c(0.5, 8.5)) +
  theme(legend.position = "none", plot.margin = unit(c(1,1,1,1), "mm")) +
  labs(x = "z-statistic", y = "")

ggsave(p1,
       filename = paste0(figurePath, "Fig_3a_Yeo7_ridge.png"), 
       dpi = dpi,
       width = 60,
       height = 30,
       units = "mm")

Margulies’ connectivity gradients

# Load all 10 gradients from brainstat
# Source: https://brainstat.readthedocs.io/en/master/index.html
nGrad          <- 10
prefix         <- "data/ignore_fMRI_version1/brainstat/Gradient_"

# Get the medial walls from donour
mwm_L <- Yeo7_xii$meta$cortex$medial_wall_mask$left
mwm_R <- Yeo7_xii$meta$cortex$medial_wall_mask$right

# Create data frame with 
Margulies_grad        <- as.data.frame(matrix(NA, nrow = 59412, ncol = 10))
names(Margulies_grad) <- paste0("Grad_", 1:10)

# Loop through all gradients
for(i in 1:nGrad){
  # Create a new xifti by reading the gifti files
  tmp_xii <- read_xifti2(cortexL = paste0(prefix, i, "_L.shape.gii"), 
                                       surfL = surfLeft,
                                       mwall_values = c(0),
                                       cortexR = paste0(prefix, i, "_R.shape.gii"),
                                       surfR = surfRight)
  
  # Use the medial wall values from donour and apply them to the loaded file. 
  tmp_xii$data$cortex_left[!mwm_L]  <- NA
  tmp_xii$data$cortex_right[!mwm_R] <- NA
  tmp_xii <- move_to_mwall(tmp_xii, values = c(NA))
  
  # Add to prepared data frame
  Margulies_grad[, i] <- c(tmp_xii$data$cortex_left, tmp_xii$data$cortex_right)
}

# Add Yeo 7 to the data frame
Margulies_grad$Yeo7_name   <- Yeo_zValues_df$net_label

# Remove the missing values
Margulies_grad_sub <- Margulies_grad[!is.na(Margulies_grad$Yeo7_name), ]

# Plot 
p1 <- ggplot(Margulies_grad_sub, aes(x = Grad_2, y = Grad_1, colour = Yeo7_name)) + 
  geom_point(alpha = 0.2, size = 0.3, shape = 16) +
  theme_void() + 
  theme(legend.position = "none") +
  scale_colour_manual(values = Yeo7_colours)

# Save file
ggsave(p1,
       filename = paste0(figurePath, "Fig_3a_Yeo7_Margulies_space.png"), 
       dpi = dpi,
       width = 30,
       height = 30,
       units = "mm")

# Significant vertices for GLM1
Margulies_grad$GLM_1_direction <- NA
Margulies_grad$GLM_1_direction[c(GLM1_pMap1_xii$data$cortex_left, GLM1_pMap1_xii$data$cortex_right) > cutOff]  <- "Familiarity"
Margulies_grad$GLM_1_direction[c(GLM1_pMap2_xii$data$cortex_left, GLM1_pMap2_xii$data$cortex_right) > cutOff]  <- "Novelty"

# Create subset of only significant vertices
Margulies_grad_sub <- Margulies_grad[!is.na(Margulies_grad$GLM_1_direction), ]

# Plot 
p1 <- ggplot(Margulies_grad_sub, aes(x = Grad_2, y = Grad_1, colour = GLM_1_direction)) + 
  geom_point(alpha = 0.2, size = 0.3, shape = 16) +
  theme_void() + 
  scale_colour_manual(values = cool_warm_colours) + 
  theme(legend.position = "none") 

# Save file
ggsave(p1,
       filename = paste0(figurePath, "Fig_3a_NovFam_Margulies_space.png"), 
       dpi = dpi,
       width = 70,
       height = 70,
       units = "mm")
# Splitting the dataset into the Training set and Test set 
# https://www.geeksforgeeks.org/classifying-data-using-support-vector-machinessvms-in-r/
svm_df <-  data.frame(Grad_1 = Margulies_grad_sub$Grad_1,
                      Grad_2 = Margulies_grad_sub$Grad_2,
                      Modulation = ifelse(Margulies_grad_sub$GLM_1_direction == "Novelty", 1, 0))
# Seed for this analysis
set.seed(123) 

# Split the data into training and test
split        <- sample.split(svm_df$Modulation, SplitRatio = 0.75) 
training_set <- subset(svm_df, split == TRUE) 
test_set     <- subset(svm_df, split == FALSE) 

# Feature scaling 
training_set[-3] <- scale(training_set[-3]) 
test_set[-3]     <- scale(test_set[-3]) 

# Fitting SVM to the Training set 
classifier <- svm(formula = Modulation ~ ., 
                 data = training_set, 
                 type = 'C-classification', 
                 kernel = 'radial') 

# Predicting the Test set results
y_pred   <- predict(classifier, newdata = test_set[-3]) 
cm       <- table(test_set[, 3], y_pred)/sum(table(test_set[, 3], y_pred))
cm       <- round(cm *100, 2)
accuracy <- mean(test_set[, 3] == y_pred)

The SVM-classification accuracy is 95.29% for significant novelty vs. familiarity vertices in Margulies’ et al. (2015) gradient space.

Resting-state connectivity between spatial novelty gradients in hippocampus and the posterior parietal cortex

Figure 4

# Load FRSC data
load("data/ignore_fMRI_version1/grad_FRSC/HC_2_cortex_FRSC_SpaNov_gradient.RData")

# Colour values for the diagonals
diagonal_colours <- c("#595959", "#f2f2f2")

# Convert list to data frame
FRSC_data <- rbindlist(HC_2_cortex_SpaNov_gradient)
FRSC_data <- as.data.frame(FRSC_data)

# Create column whether or not a pair is on the diagonal
FRSC_data$diagonal <- ifelse(FRSC_data$gradient_level_cortex == FRSC_data$gradient_level_HC, "diagonal", "off-diagonal")

# Create average heatmap
## Calculate the average connectivity for the matrix
FRSC_data_heatmap <- ddply(FRSC_data, c("hemisphere_cortex", "hemisphere_HC",
                                        "gradient_level_cortex", "gradient_level_HC"),
                          summarise, connectivity = mean(connectivity))

# Convert "left" and "right" to L & R
FRSC_data_heatmap$hemisphere_HC     <- ifelse(FRSC_data_heatmap$hemisphere_HC == "left", "L", "R")
FRSC_data_heatmap$hemisphere_cortex <- ifelse(FRSC_data_heatmap$hemisphere_cortex == "left", "L", "R")

# Create better labels for the facets
FRSC_data_heatmap$hemisphere_HC     <- paste(FRSC_data_heatmap$hemisphere_HC, "HC")
FRSC_data_heatmap$hemisphere_cortex <- paste(FRSC_data_heatmap$hemisphere_cortex, "PMC")

# Create column whether or not a pair is on the diagonal
FRSC_data$diagonal         <- ifelse(FRSC_data$gradient_level_cortex == FRSC_data$gradient_level_HC, "diagonal", "off-diagonal")
FRSC_data_heatmap$diagonal <- ifelse(FRSC_data_heatmap$gradient_level_cortex == FRSC_data_heatmap$gradient_level_HC, "diagonal", "off-diagonal")

# Create heatmap
heatmap_data <- ggplot(FRSC_data_heatmap, aes(x = gradient_level_cortex, y = gradient_level_HC, fill = connectivity)) +
  facet_grid(hemisphere_HC ~ hemisphere_cortex) +
  geom_tile() +
  scale_fill_viridis_c(breaks = c(0.010, 0.015, 0.020)) +
  scale_x_continuous(breaks = 1:6) +
  #scale_y_continuous(breaks = 1:6) +
  coord_equal(expand = FALSE) +
  base_theme +
  theme(legend.position = "none",
        axis.text.y = element_blank(),
        axis.ticks.y = element_blank()) +
  labs(x = "Gradient in PMC", y = "Gradient in HC",
       fill = "z(r)", title = "Resting-state data")

# Create illustration for diagonal
heatmap_illustration <- ggplot(FRSC_data_heatmap, aes(x = gradient_level_cortex, y = gradient_level_HC, fill = diagonal)) +
  facet_grid(hemisphere_HC ~ hemisphere_cortex) +
  geom_tile() +
  scale_fill_manual(values = diagonal_colours) +
  scale_x_continuous(breaks = 1:6) +
  #scale_y_continuous(breaks = 1:6) +
  coord_equal(expand = FALSE) +
  base_theme +
  theme(legend.position = "none",
        axis.text.y = element_blank(),
        axis.ticks.y = element_blank()) +
  labs(x = "Gradient in PMC", y = "Gradient in HC",
       fill = "Diagonal", title = "Theorectial prediction")

# Calculate average connectivity of the diagonals
FRSC_data_diagonal <- ddply(FRSC_data,  c("subject", "hemisphere_cortex", "hemisphere_HC", "diagonal"),
                            summarise, conn = mean(connectivity, na.rm = TRUE))

# Correct for baseline difference to better visualise within-subject differences (i.e. removing the between-subject variability)
# More information here: https://www.cogsci.nl/blog/tutorials/156-an-easy-way-to-create-graphs-with-within-subject-error-bars
## Calculate grand-average across participants but for hemisphere combination separately
FRSC_data_diagonal <- ddply(FRSC_data_diagonal,c("hemisphere_cortex", "hemisphere_HC"), 
                           mutate, grand_avg_conn = mean(conn, na.rm = TRUE))
## Calculate subject-average for each hemisphere combination separately. I.e. 
## averaging the values for diagonal vs. off-diagonals
FRSC_data_diagonal <- ddply(FRSC_data_diagonal,c("subject", "hemisphere_cortex", "hemisphere_HC"), 
                           mutate, subj_avg_conn = mean(conn, na.rm = TRUE))

## Baseline correct
FRSC_data_diagonal$corrected_conn <- FRSC_data_diagonal$conn - FRSC_data_diagonal$subj_avg_conn + FRSC_data_diagonal$grand_avg_conn

# Calculate the difference between diagonal and off-diagonal
FRSC_data_diagonal <- ddply(FRSC_data_diagonal, c("subject", "hemisphere_cortex", "hemisphere_HC"),
                            mutate, diff = conn[1] - conn[2], corrected_diff = corrected_conn[1] - corrected_conn[2])

# Importantly the correction applied here leads to the same difference values

# Create figure
## L-HC & L-PMC
### Subset
data_sub <- FRSC_data_diagonal[FRSC_data_diagonal$hemisphere_HC == "left" & FRSC_data_diagonal$hemisphere_cortex == "left", ]
data_sub1 <- data_sub[data_sub$diagonal == "diagonal", ]
data_sub2 <- data_sub[data_sub$diagonal == "off-diagonal", ]

## Create figure
p1 <- ggplot(data_sub, aes(x = diagonal, y = corrected_conn)) +
  geom_hline(yintercept = unique(data_sub$grand_avg_conn), linetype = "dashed", linewidth = 0.3) +
  geom_point(aes(colour = diff), size = 0.5) +
  geom_line(aes(group = subject, colour = diff), size = 0.5) +
  geom_half_violin(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  geom_half_violin(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  scale_colour_viridis_c() +
  scale_fill_manual(values = diagonal_colours) +
  base_theme +
  theme(legend.position = "none") +
  labs(x = "", y = "Connectivity", title = "L-HC x L-PMC")

## L-HC & R-PMC
### Subset
data_sub <- FRSC_data_diagonal[FRSC_data_diagonal$hemisphere_HC == "left" & FRSC_data_diagonal$hemisphere_cortex == "right", ]
data_sub1 <- data_sub[data_sub$diagonal == "diagonal", ]
data_sub2 <- data_sub[data_sub$diagonal == "off-diagonal", ]

## Create figure
p2 <- ggplot(data_sub, aes(x = diagonal, y = corrected_conn)) +
  geom_hline(yintercept = unique(data_sub$grand_avg_conn), linetype = "dashed", linewidth = 0.3) +
  geom_point(aes(colour = diff), size = 0.5) +
  geom_line(aes(group = subject, colour = diff), size = 0.5) +
  geom_half_violin(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  geom_half_violin(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  scale_colour_viridis_c() +
  scale_fill_manual(values = diagonal_colours) +
  base_theme +
  theme(legend.position = "none") +
  labs(x = "", y = "Connectivity", title = "L-HC x R-PMC")

## R-HC & L-PMC
### Subset
data_sub <- FRSC_data_diagonal[FRSC_data_diagonal$hemisphere_HC == "right" & FRSC_data_diagonal$hemisphere_cortex == "left", ]
data_sub1 <- data_sub[data_sub$diagonal == "diagonal", ]
data_sub2 <- data_sub[data_sub$diagonal == "off-diagonal", ]

## Create figure
p3 <- ggplot(data_sub, aes(x = diagonal, y = corrected_conn)) +
  geom_hline(yintercept = unique(data_sub$grand_avg_conn), linetype = "dashed", linewidth = 0.3) +
  geom_point(aes(colour = diff), size = 0.5) +
  geom_line(aes(group = subject, colour = diff), size = 0.5) +
  geom_half_violin(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  geom_half_violin(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  scale_colour_viridis_c() +
  scale_fill_manual(values = diagonal_colours) +
  base_theme +
  theme(legend.position = "none") +
  labs(x = "", y = "Connectivity", title = "R-HC x L-PMC")


## R-HC & R-PMC
### Subset
data_sub <- FRSC_data_diagonal[FRSC_data_diagonal$hemisphere_HC == "right" & FRSC_data_diagonal$hemisphere_cortex == "right", ]
data_sub1 <- data_sub[data_sub$diagonal == "diagonal", ]
data_sub2 <- data_sub[data_sub$diagonal == "off-diagonal", ]

## Create figure
p4 <- ggplot(data_sub, aes(x = diagonal, y = corrected_conn)) +
  geom_hline(yintercept = unique(data_sub$grand_avg_conn), linetype = "dashed", linewidth = 0.3) +
  geom_point(aes(colour = diff), size = 0.5) +
  geom_line(aes(group = subject, colour = diff), size = 0.5) +
  geom_half_violin(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub1, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "l", fill = diagonal_colours[1], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  geom_half_violin(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.15, size = 0.1, width = 0.5) +
  geom_half_boxplot(data = data_sub2, mapping = aes(x = diagonal, y = corrected_conn), 
                   side = "r", fill = diagonal_colours[2], nudge = 0.12, width = 0.1, outlier.shape = NA, size = 0.1) +
  scale_colour_viridis_c() +
  scale_fill_manual(values = diagonal_colours) +
  base_theme +
  theme(legend.position = "none") +
  labs(x = "", y = "Connectivity", title = "R-HC x R-PMC")


# Add plots together
diagonal_plots  <- plot_grid(p1, p2, p3, p4, ncol = 2)
combined_figure <- plot_grid(plot_grid(heatmap_illustration, heatmap_data, ncol = 1), 
                             diagonal_plots, 
                             nrow = 1, rel_widths = c(1, 1.7))

# Save
ggsave(combined_figure,
       filename = paste0(figurePath, "Fig_4_FRRC.png"), 
       dpi = dpi,
       width = 180,
       height = 80,
       units = "mm")

# Calculate stats
FRSC_data_diagonal_diff <- ddply(FRSC_data_diagonal, c("subject", "hemisphere_cortex", "hemisphere_HC"),
                                 summarise, diff  = diff[1], corrected_diff = corrected_diff[1])
FRSC_data_diagonal_diff_stats <- ddply(FRSC_data_diagonal_diff, c("hemisphere_cortex", "hemisphere_HC"), summarise,
                                       p = t.test(diff)$p.value,
                                       BF10 = signif(reportBF(ttestBF(diff)), digits1),
                                       d = signif(mean(diff)/sd(diff), digits1))

kable(FRSC_data_diagonal_diff_stats)
hemisphere_cortex hemisphere_HC p BF10 d
left left 0.0000442 470 0.59
left right 0.0000030 5600 0.69
right left 0.0001911 120 0.53
right right 0.0000002 65000 0.79

Methods

Task and virtual environment

Trial types of the experiment

Encoding trial

Average trial lengths:

# Subset data to encoding only
encoding_only <- OLM_7T_trial_results[OLM_7T_trial_results$trialType == "encoding", ]

# Calculate trial duration
encoding_only$trial_duration <- encoding_only$end_time - encoding_only$start_time

# Calculate the average for each subject
trial_dur <- ddply(encoding_only, c("subject"), summarise,
                   total_length = sum(trial_duration),
                   run_length = total_length/2,
                   trial_duration = mean(trial_duration),
                   N = length(subject))

# Create report strings
str1   <-  mean_SD_str2(trial_dur$trial_duration, report_type, digits1, rounding_type)
str2   <-  mean_SD_str2(trial_dur$run_length, report_type, digits1, rounding_type)

Average trial duration of an encoding trial: M = 20 (SD = 3.1).

Average encoding run duration: M = 20 (SD = 3.1).

Procedure

Days between sessions

# Load lab notebook with information on each subject
lab_notebook <- read_excel("data/ignore_fMRI_version1/VP00157_PPT_Info_Final.xlsx")

# Subset to only the participants used in this analysis
lab_notebook <- lab_notebook[lab_notebook$`ID Number` %in% subjects, ]

# Extract time between sessions
val <- lab_notebook$`Delay between sessions`

# Create report string
str1   <-  mean_SD_str2(val, report_type, digits1, rounding_type)
  • Number of days between sessions M = NaN (SD = NA)

MRI acquisition

Number of volumes

# Load the .RData with the values
load("data/ignore_fMRI_version1/extracted_values/SpaNov_nVol_mean.RData")

# Calculate the total number of volumes
nVol_mean$nVol_total <- nVol_mean$nVol1 + nVol_mean$nVol2

# Convert this to minutes
nVol_mean$dataMinutes <- nVol_mean$nVol_total/60

# Create report strings
str1   <-  mean_SD_str2(nVol_mean$nVol1, report_type, digits1, rounding_type)
str2   <-  mean_SD_str2(nVol_mean$nVol2, report_type, digits1, rounding_type)
minTime <- signif(min(nVol_mean$dataMinutes), 3)
  • Number of volumes Run 1: M = 410 (SD = 81)
  • Number of volumes Run 2: M = 370 (SD = 37)
  • Minimum data in minutes: 10.5

Neuroimaging analysis overview

Standard GLMs

Gradient analysis

Hippocampal gradient analysis

Get the number of average voxels per position

voxelNum <- mean_SD_str2(HC_data_agg_pos$n, 1,digits1, rounding_type)

The average number of voxels per position is M = 7.6 (SD = 3)

Supplementary material

Supplementary notes

Supplementary tables

Significant clusters for linear contrast (Level 1 < Level 2 < Level 3 < …)

# Create tables
kable(GLM1_cluster1$cluster_values)
part cluster size zValue_mean zValue_median zValue_max zValue_min zValue_peak_abs cluster_mass clsuter_mass_log peak_x peak_y peak_z peak_region peak_region_name num_regions
cortex_left 1 359 3.203255 3.173238 4.6 2.5 4.585031 1100 7.047490 -23.633364 -44.6201248 71.3573380 232 2 7
cortex_left 2 274 2.995573 2.955703 4.0 2.4 4.035157 820 6.710264 -44.840401 -25.9435463 17.5734921 284 RI 8
cortex_left 4 1301 3.642661 3.483272 6.6 2.3 6.552618 4700 8.463603 -5.515844 -16.9644909 58.8415260 220 24dd 16
cortex_left 7 32 2.848919 2.843505 3.2 2.5 3.218965 91 4.512676 -9.722655 -43.1805611 61.3034210 217 5mv 2
cortex_left 11 74 2.933807 2.916898 3.7 2.5 3.720569 220 5.380366 -34.552116 -25.5105019 65.8123550 233 3a 3
cortex_left 12 170 3.315195 3.260149 4.5 2.5 4.487104 560 6.334315 -41.879894 -15.4149284 57.2907143 188 4 6
cortex_left 18 110 3.164698 3.142001 4.4 2.5 4.369443 350 5.852538 -51.698463 -68.2391205 11.5491571 320 TPOJ2 5
cortex_left 19 121 2.856008 2.772685 3.7 2.4 3.700944 350 5.845215 -60.648418 -44.3813210 -1.5860875 310 STSvp 4
cortex_left 20 93 3.001629 2.900192 4.0 2.5 4.023314 280 5.631755 -60.607883 -47.3325958 12.7079782 208 STV 5
cortex_left 21 25 2.904840 2.918153 3.6 2.4 3.621578 73 4.285254 -61.798660 -42.9189110 16.2704659 208 STV 2
cortex_left 23 38 2.938141 2.895385 3.6 2.5 3.638628 110 4.715363 -44.560581 -14.6814651 0.5779716 283 52 3
cortex_left 28 22 2.821405 2.751237 3.5 2.5 3.482579 62 4.128278 -40.958489 7.6166196 4.8948507 294 FOP3 2
cortex_left 33 23 3.012461 3.066713 3.6 2.5 3.574365 69 4.238252 -16.368948 -86.4783249 41.0923004 332 V6A 2
cortex_left 37 167 3.179383 3.117347 4.4 2.5 4.439869 530 6.274681 -51.717174 -66.0832825 29.1244259 330 PGi 2
cortex_left 40 52 2.874302 2.849513 3.3 2.5 3.281207 150 5.007053 -62.635849 -34.6212502 33.1443672 328 PF 2
cortex_left 41 167 3.085788 2.938134 4.3 2.5 4.327454 520 6.244801 -58.049030 10.6251764 15.6536179 258 6r 6
cortex_left 44 121 3.109789 3.074158 4.0 2.4 4.027047 380 5.930345 -8.065454 48.8018227 -11.7797298 245 10r 4
cortex_left 48 180 2.999729 2.896891 4.2 2.4 4.196778 540 6.291479 -56.474571 3.4510653 -16.3791847 308 STSda 7
cortex_left 57 333 3.098199 3.099359 3.9 2.4 3.936117 1000 6.938963 -17.164730 61.4577560 27.5678673 267 9a 7
cortex_left 61 22 3.246963 3.164694 4.1 2.5 4.058548 71 4.268763 -8.394770 51.6081810 7.1561594 249 9m 3
cortex_left 62 30 2.774007 2.705170 3.3 2.5 3.294678 83 4.421490 -34.954205 48.4772034 32.8404121 264 46 1
cortex_left 73 29 2.776174 2.749759 3.4 2.5 3.402303 81 4.388369 -60.065445 -11.2030029 -7.9695344 308 STSda 3
cortex_left 76 26 2.737490 2.698177 3.1 2.5 3.139381 71 4.265138 -61.666889 -17.1937847 -17.1826057 312 TE1a 3
cortex_right 77 38 3.046592 3.028426 3.7 2.5 3.683780 120 4.751610 50.427727 0.0801602 53.1665268 10 FEF 3
cortex_right 78 1648 3.465575 3.388570 5.9 2.4 5.916368 5700 8.650196 13.677155 -16.6584091 75.3533707 55 6mp 19
cortex_right 81 627 3.356290 3.302903 4.8 2.4 4.839956 2100 7.651783 32.501801 -42.9102974 67.2845840 52 2 9
cortex_right 82 69 2.818986 2.861367 3.4 2.4 3.449331 190 5.270484 31.048088 -26.2055817 66.9562302 53 3a 2
cortex_right 83 163 3.013055 2.911259 4.1 2.5 4.138606 490 6.196705 57.860054 -13.1080933 47.0665741 51 1 4
cortex_right 86 50 3.067693 3.041093 3.9 2.4 3.860341 150 5.032949 63.991276 -34.1908493 35.2002335 148 PF 1
cortex_right 87 171 3.196083 3.086569 4.7 2.5 4.676639 550 6.303589 47.416145 -25.7598839 18.3474407 104 RI 6
cortex_right 88 76 2.892311 2.848980 3.9 2.4 3.852891 220 5.392789 57.842972 -34.0697250 -1.2721851 129 STSdp 3
cortex_right 90 70 3.147435 3.169488 4.1 2.5 4.055309 220 5.395083 62.873524 -40.0575027 15.5642920 28 STV 3
cortex_right 92 22 2.675780 2.721737 3.1 2.2 3.102967 59 4.075284 65.307075 -25.7790356 6.8982592 175 A4 2
cortex_right 97 42 2.944008 2.925953 3.5 2.6 3.470099 120 4.817442 44.581242 8.7226772 8.1082582 115 FOP2 2
cortex_right 99 26 2.932007 2.932414 3.4 2.6 3.361173 76 4.333784 41.860241 29.4441929 3.9625542 108 FOP4 2
cortex_right 101 47 3.324561 3.345844 4.4 2.5 4.357726 160 5.051485 18.361319 -84.3934402 42.2968483 152 V6A 3
cortex_right 103 171 3.506797 3.404864 5.5 2.4 5.462230 600 6.396367 50.527477 -72.1769028 9.3044167 23 MT 6
cortex_right 107 20 2.880561 2.799009 3.3 2.6 3.324425 58 4.053717 57.994289 4.6990094 36.6352959 56 6v 2
cortex_right 108 130 2.891829 2.836778 4.3 2.5 4.273050 380 5.929424 63.827797 -20.2718563 28.0065994 147 PFop 3
cortex_right 111 185 3.244620 3.246175 4.6 2.4 4.584883 600 6.397354 58.813469 12.2502441 7.8504372 78 6r 5
cortex_right 112 29 2.961780 2.908350 3.8 2.4 3.758126 86 4.453086 7.906230 48.9586067 -14.1416931 65 10r 3
cortex_right 127 53 2.936314 2.864966 3.7 2.5 3.723906 160 5.047447 8.638423 55.7692184 25.6142464 69 9m 2
cortex_right 128 28 2.968046 2.955293 3.5 2.5 3.506621 83 4.420108 35.940197 50.5307732 30.8277359 84 46 1
cortex_right 130 25 3.053426 3.030692 3.8 2.5 3.842268 76 4.335140 38.981052 17.5954723 -38.0150261 131 TGd 1
cortex_right 135 20 2.751254 2.717484 3.2 2.5 3.240510 55 4.007789 57.022911 5.3255630 -19.5459213 128 STSda 2
subcort 146 123 3.064884 2.939471 4.9 2.4 4.934852 380 5.932194 26.000000 -88.0000000 -40.0000000 371 CEREBELLUM_RIGHT 1
subcort 167 115 2.956257 2.879120 4.0 2.4 3.997005 340 5.828856 18.000000 -8.0000000 -20.0000000 376 HIPPOCAMPUS_RIGHT 2
subcort 168 153 3.077743 3.008770 4.4 2.4 4.441753 470 6.154634 -18.000000 -12.0000000 -16.0000000 367 HIPPOCAMPUS_LEFT 2
subcort 171 26 2.766934 2.705764 3.4 2.5 3.380726 72 4.275836 -26.000000 -6.0000000 -20.0000000 368 AMYGDALA_LEFT 2
subcort 188 20 2.875691 2.869097 3.6 2.5 3.636717 58 4.052025 -16.000000 8.0000000 -8.0000000 364 PUTAMEN_LEFT 1
subcort 193 20 2.780729 2.790532 3.2 2.4 3.186041 56 4.018445 32.000000 2.0000000 -6.0000000 374 PUTAMEN_RIGHT 1
kable(GLM1_cluster1$cluster_labels)
part cluster regions labels
cortex_left 1 232,219,231,222,229,225,227 2,5L,1,7AL,VIP,7Am,7PC
cortex_left 2 282,285,284,204,205,354,328,281 OP2-3,PFcm,RI,A1,PSL,LBelt,PF,OP1
cortex_left 4 218,221,223,240,239,189,233,188,235,224,216,217,220,206,234,276 23c,24dv,SCEF,p32pr,a24pr,3b,3a,4,6mp,6ma,5m,5mv,24dd,SFL,6d,6a
cortex_left 7 219,217 5L,5mv
cortex_left 11 189,233,188 3b,3a,4
cortex_left 12 188,234,190,189,231,233 4,6d,FEF,3b,1,3a
cortex_left 18 320,319,203,182,321 TPOJ2,TPOJ1,MT,MST,TPOJ3
cortex_left 19 309,305,310,313 STSdp,A5,STSvp,TE1p
cortex_left 20 319,208,305,309,355 TPOJ1,STV,A5,STSdp,A4
cortex_left 21 208,205 STV,PSL
cortex_left 23 283,347,348 52,PoI1,Ig
cortex_left 28 295,294 FOP2,FOP3
cortex_left 33 332,183 V6A,V6
cortex_left 37 330,331 PGi,PGs
cortex_left 40 328,327 PF,PFop
cortex_left 41 293,288,258,279,236,188 FOP1,FOP4,6r,43,6v,4
cortex_left 44 268,245,345,252 10v,10r,s32,10d
cortex_left 48 311,303,287,305,308,356,312 TGd,STGa,TA2,A5,STSda,STSva,TE1a
cortex_left 57 206,250,251,252,249,267,248 SFL,8BL,9p,10d,9m,9a,8Ad
cortex_left 61 244,241,249 p32,a24,9m
cortex_left 62 264 46
cortex_left 73 308,305,356 STSda,A5,STSva
cortex_left 76 356,312,310 STSva,TE1a,STSvp
cortex_right 77 12,8,10 55b,4,FEF
cortex_right 78 38,41,43,60,57,8,55,44,37,39,36,40,52,51,9,54,10,53,96 23c,24dv,SCEF,p32pr,p24pr,4,6mp,6ma,5mv,5ROI,5m,24dd,2,1,3b,6d,FEF,3a,6a
cortex_right 81 39,52,47,51,42,49,45,117,48 5ROI,2,7PC,1,7AROI,VIP,7Am,AIP,LIPv
cortex_right 82 9,53 3b,3a
cortex_right 83 9,51,52,116 3b,1,2,PFt
cortex_right 86 148 PF
cortex_right 87 105,104,25,124,174,101 PFcm,RI,PSROI,PBelt,LBelt,OP1
cortex_right 88 129,139,125 STSdp,TPOJ1,A5
cortex_right 90 139,28,125 TPOJ1,STV,A5
cortex_right 92 125,175 A5,A4
cortex_right 97 115,114 FOP2,FOP3
cortex_right 99 108,169 FOP4,FOP5
cortex_right 101 16,152,3 V7,V6A,V6
cortex_right 103 23,137,157,2,140,141 MT,PHT,FST,MST,TPOJ2,TPOJ3
cortex_right 107 8,56 4,6v
cortex_right 108 105,148,147 PFcm,PF,PFop
cortex_right 111 113,78,99,56,8 FOP1,6r,43,6v,4
cortex_right 112 65,88,165 10r,10v,s32
cortex_right 127 69,87 9m,9a
cortex_right 128 84 46
cortex_right 130 131 TGd
cortex_right 135 123,128 STGa,STSda
subcort 146 371 CEREBELLUM_RIGHT
subcort 167 376,377 HIPPOCAMPUS_RIGHT,AMYGDALA_RIGHT
subcort 168 367,368 HIPPOCAMPUS_LEFT,AMYGDALA_LEFT
subcort 171 368,367 AMYGDALA_LEFT,HIPPOCAMPUS_LEFT
subcort 188 364 PUTAMEN_LEFT
subcort 193 374 PUTAMEN_RIGHT

Significant clusters for linear contrast (Level 1 > Level 2 > Level 3 > …)

# Create tables
kable(GLM1_cluster2$cluster_values)
part cluster size zValue_mean zValue_median zValue_max zValue_min zValue_peak_abs cluster_mass clsuter_mass_log peak_x peak_y peak_z peak_region peak_region_name num_regions
cortex_left 1 155 -3.029326 -2.885298 -2.4 -4.3 4.339565 470 6.151765 -36.455795 60.54835 7.798744 265 a9-46v 5
cortex_left 2 212 -3.737318 -3.564760 -2.5 -5.6 5.613298 790 6.674954 -2.259454 -31.05509 32.052841 194 RSC 4
cortex_left 3 2335 -4.274944 -4.232842 -2.4 -7.3 7.260047 10000 9.208538 -32.752682 -89.95303 28.335506 326 IP0 31
cortex_left 4 324 -3.542725 -3.471307 -2.4 -5.1 5.081648 1100 7.045640 -27.248730 -53.90340 -16.745516 307 PHA3 11
cortex_left 5 123 -3.319361 -3.299104 -2.5 -4.2 4.229629 410 6.011957 -5.989912 20.90646 53.243435 223 SCEF 4
cortex_left 9 260 -3.695724 -3.601675 -2.4 -5.5 5.487847 960 6.867858 -27.186953 19.61117 58.827522 277 i6-8 8
cortex_left 11 27 -3.256962 -3.161758 -2.5 -4.3 4.260114 88 4.476632 -52.008701 22.97536 5.054845 254 44 1
cortex_left 12 28 -3.008976 -2.935735 -2.5 -3.9 3.921132 84 4.433804 -37.596149 27.65651 -9.035631 291 AVI 2
cortex_left 14 339 -3.412455 -3.436995 -2.5 -4.8 4.797974 1200 7.053432 -47.295277 16.06033 34.457317 260 IFJp 7
cortex_left 30 724 -4.430463 -4.326066 -2.4 -7.3 7.325409 3200 8.073295 -15.719695 -91.85909 -13.344853 184 V2 5
cortex_left 31 23 -2.916152 -2.955783 -2.5 -3.3 3.279240 67 4.205759 -49.620136 -64.41525 -15.030399 318 PH 1
cortex_left 34 72 -2.901869 -2.888328 -2.5 -3.7 3.657163 210 5.342021 -8.872306 -79.71043 11.666974 181 V1 1
cortex_right 37 2700 -4.145337 -3.960887 -2.4 -7.7 7.692244 11000 9.322991 37.719601 -86.56818 25.882921 146 IP0 39
cortex_right 38 1059 -4.315031 -4.131713 -2.4 -7.5 7.472578 4600 8.427185 13.443163 -93.01896 -3.003601 1 V1 14
cortex_right 40 82 -3.265680 -3.300700 -2.5 -4.1 4.115271 270 5.590187 6.666053 24.24194 53.370617 63 8BM 3
cortex_right 43 750 -3.525372 -3.499682 -2.5 -5.1 5.094378 2600 7.880059 48.756924 39.91827 20.827803 81 IFSp 14
cortex_right 51 51 -2.934255 -2.859823 -2.5 -3.6 3.629739 150 5.008279 39.605103 58.47044 9.990405 85 a9-46v 3
cortex_right 71 95 -3.263703 -3.138259 -2.4 -5.2 5.153910 310 5.736739 23.169453 66.53020 2.306681 170 p10p 4
cortex_right 75 32 -3.036814 -3.057288 -2.4 -3.7 3.674551 97 4.576545 26.596565 44.34168 43.048695 68 8Ad 2
subcort 78 216 -3.358756 -3.327735 -2.4 -4.9 4.926994 730 6.586849 -38.000000 -70.00000 -52.000000 361 CEREBELLUM_LEFT 1
subcort 79 365 -3.483206 -3.388930 -2.4 -5.5 5.521689 1300 7.147850 10.000000 -46.00000 -48.000000 371 CEREBELLUM_RIGHT 2
subcort 84 271 -3.421042 -3.258977 -2.5 -5.4 5.363540 930 6.832064 34.000000 -70.00000 -54.000000 371 CEREBELLUM_RIGHT 1
subcort 85 321 -3.375263 -3.306256 -2.4 -5.2 5.169371 1100 6.987914 -24.000000 -32.00000 -42.000000 361 CEREBELLUM_LEFT 2
subcort 92 864 -3.751310 -3.604606 -2.4 -6.8 6.795970 3200 8.083678 8.000000 -78.00000 -24.000000 371 CEREBELLUM_RIGHT 2
subcort 104 310 -3.378930 -3.263888 -2.4 -5.2 5.166298 1000 6.954131 28.000000 -64.00000 -30.000000 371 CEREBELLUM_RIGHT 1
subcort 106 329 -3.516564 -3.398587 -2.4 -5.5 5.502928 1200 7.053542 -26.000000 -60.00000 -32.000000 361 CEREBELLUM_LEFT 1
subcort 154 74 -2.980170 -2.912105 -2.5 -4.0 4.003192 220 5.396045 18.000000 -32.00000 6.000000 372 THALAMUS_RIGHT 1
kable(GLM1_cluster2$cluster_labels)
part cluster regions labels
cortex_left 1 265,351,266,269,350 a9-46v,p47r,9-46d,a10p,p10p
cortex_left 2 194,211,214,212 RSC,POS1,d23ab,23d
cortex_left 3 211,301,207,342,218,297,324,329,230,197,195,322,226,225,209,210,193,186,199,326,323,331,325,196,275,228,338,321,330,184,181 POS1,ProS,PCV,31a,23c,AIP,IP2,PFm,MIP,IPS1,POS2,DVT,7PL,7Am,7Pm,7m,V3A,V4,V3B,IP0,PGp,PGs,IP1,V7,LIPd,LIPv,V3CD,TPOJ3,PGi,V2,V1
cortex_left 4 306,333,301,299,184,340,307,335,334,343,186 PHA1,VMV1,ProS,PreS,V2,VMV2,PHA3,PHA2,VMV3,VVC,V4
cortex_left 5 223,206,243,359 SCEF,SFL,8BM,a32pr
cortex_left 9 190,276,224,278,248,277,247,192 FEF,6a,6ma,s6-8,8Ad,i6-8,8Av,55b
cortex_left 11 254 44
cortex_left 12 291,289 AVI,MI
cortex_left 14 191,253,263,262,261,259,260 PEF,8C,p9-46v,IFSa,IFSp,IFJa,IFJp
cortex_left 30 181,184,185,186,187 V1,V2,V3,V4,V8
cortex_left 31 318 PH
cortex_left 34 181 V1
cortex_right 37 149,14,31,121,34,32,35,162,27,38,144,50,17,13,30,15,142,46,45,152,29,161,6,19,146,143,151,145,16,95,117,48,20,158,159,150,5,1,3 PFm,RSC,POS1,ProS,d23ab,23d,31pv,31a,PCV,23c,IP2,MIP,IPS1,V3A,7m,POS2,DVT,7PROI,7Am,V6A,7Pm,31pd,V4,V3B,IP0,PGp,PGs,IP1,V7,LIPd,AIP,LIPv,LO1,V3CD,LO3,PGi,V3,V1,V6
cortex_right 38 160,153,126,119,4,127,155,154,163,135,1,5,6,7 VMV2,VMV1,PHA1,PreS,V2,PHA3,PHA2,VMV3,VVC,TF,V1,V3,V4,V8
cortex_right 40 63,43,179 8BM,SCEF,a32pr
cortex_right 43 10,96,11,73,83,81,82,78,80,79,68,97,67,12 FEF,6a,PEF,8C,p9-46v,IFSp,IFSa,6r,IFJp,IFJa,8Ad,i6-8,8Av,55b
cortex_right 51 85,86,170 a9-46v,9-46d,p10p
cortex_right 71 170,89,90,72 p10p,a10p,10pp,10d
cortex_right 75 68,84 8Ad,46
subcort 78 361 CEREBELLUM_LEFT
subcort 79 371,366 CEREBELLUM_RIGHT,BRAIN_STEM
subcort 84 371 CEREBELLUM_RIGHT
subcort 85 361,366 CEREBELLUM_LEFT,BRAIN_STEM
subcort 92 361,371 CEREBELLUM_LEFT,CEREBELLUM_RIGHT
subcort 104 371 CEREBELLUM_RIGHT
subcort 106 361 CEREBELLUM_LEFT
subcort 154 372 THALAMUS_RIGHT

Supplementary figures

Locomotion by novelty

# Load the image from the gradient event file creation
load("ignore_eventTable3/images/SpaNov_event_file_gradients.RData")

# Create a data frame
ev_info <- rbindlist(condition_info)

# Subset to the analysis with 6 levels and encoding and for the novelty score
ev_info <- ev_info[ev_info$curr_numLvl == 6 & ev_info$runType == "e" & ev_info$type == "noveltyScore", ]

# Calculate average across runs
ev_info_agg <- ddply(ev_info, c("subj", "level"), summarise, 
                     mean_per_tra_arc = mean(mean_per_tra_arc),
                     mean_per_rot_arc = mean(mean_per_rot_arc),
                     mean_per_sta_arc = mean(mean_per_sta_arc))

# Calculate average for each run
ev_info_agg1 <- ddply(ev_info, c("subj", "run"), summarise, 
                     mean_per_tra_arc = mean(mean_per_tra_arc),
                     mean_per_rot_arc = mean(mean_per_rot_arc),
                     mean_per_sta_arc = mean(mean_per_sta_arc))


# Remove middle levels
ev_info_agg <- ev_info_agg[ev_info_agg$level == "lvl1" | ev_info_agg$level == "lvl6", ]

# Create the plots
## Translation
### Define the limits
axis_y <- calc_axis_limits(ev_info_agg$mean_per_tra_arc, axisExpand)

### Plotting
p1 <- ggplot(ev_info_agg, aes(x = level, y = mean_per_tra_arc, fill = level)) +
  geom_line(aes(group = subj), size = 0.05) +
  geom_point(size = 0.1, colour = boxplot_pointColour) +
  geom_boxplot(colour = boxplot_borderColour, outlier.shape = NA, width = 0.4, size = 0.2, alpha = 0.7) +
  scale_fill_manual(values = baseColours) +
  base_theme + 
  scale_y_continuous(breaks = axis_y$breaks) + 
  coord_cartesian(xlim = c(0.5, 2.5), ylim = axis_y$limits, expand = FALSE) +
  theme(legend.position = "none") +
  labs(title = "Translation", x = "Novelty level", y = "arcsine(percentage)")

## Rotation
### Define the limits
axis_y <- calc_axis_limits(ev_info_agg$mean_per_rot_arc, axisExpand)

### Plotting
p2 <- ggplot(ev_info_agg, aes(x = level, y = mean_per_rot_arc, fill = level)) +
  geom_line(aes(group = subj), size = 0.05) +
  geom_point(size = 0.1, colour = boxplot_pointColour) +
  geom_boxplot(colour = boxplot_borderColour, outlier.shape = NA, width = 0.4, size = 0.2, alpha = 0.7) +
  scale_fill_manual(values = baseColours) +
  base_theme + 
  scale_y_continuous(breaks = axis_y$breaks) + 
  coord_cartesian(xlim = c(0.5, 2.5), ylim = axis_y$limits, expand = FALSE) +
  theme(legend.position = "none") +
  labs(title = "Rotation", x = "Novelty level", y = "arcsine(percentage)")

## Stationary
### Define the limits
axis_y <- calc_axis_limits(ev_info_agg$mean_per_sta_arc, axisExpand)

### Plotting
p3 <- ggplot(ev_info_agg, aes(x = level, y = mean_per_sta_arc, fill = level)) +
  geom_line(aes(group = subj), size = 0.05) +
  geom_point(size = 0.1, colour = boxplot_pointColour) +
  geom_boxplot(colour = boxplot_borderColour, outlier.shape = NA, width = 0.4, size = 0.2, alpha = 0.7) +
  scale_fill_manual(values = baseColours) +
  base_theme + 
  scale_y_continuous(breaks = axis_y$breaks) + 
  coord_cartesian(xlim = c(0.5, 2.5), ylim = axis_y$limits, expand = FALSE) +
  theme(legend.position = "none") +
  labs(title = "Stationary", x = "Novelty level", y = "arcsine(percentage)")

# Combine and save
p_comb <- plot_grid(p1, p2, p3, ncol = 3)

ggsave(p_comb,
       filename = paste0(figurePath, "Fig_1f_locomotion.png"), 
       dpi = dpi,
       width = double_column/2.5,
       height = double_column/6,
       units = "mm")

# Calculate the test statistics
## Translation
val1     <- ev_info_agg[ev_info_agg$level == 'lvl1', "mean_per_tra_arc"]
val2     <- ev_info_agg[ev_info_agg$level == 'lvl6', "mean_per_tra_arc"]
diff_val <- val1 - val2
BF1      <- signif(reportBF(ttestBF(diff_val)), digits1)
d1       <- signif(mean(diff_val)/sd(diff_val), digits1)

## Rotation
val1     <- ev_info_agg[ev_info_agg$level == 'lvl1', "mean_per_rot_arc"]
val2     <- ev_info_agg[ev_info_agg$level == 'lvl6', "mean_per_rot_arc"]
diff_val <- val1 - val2
BF2      <- signif(reportBF(ttestBF(diff_val)), digits1)
d2       <- signif(mean(diff_val)/sd(diff_val), digits1)

## Stationary
val1     <- ev_info_agg[ev_info_agg$level == 'lvl1', "mean_per_sta_arc"]
val2     <- ev_info_agg[ev_info_agg$level == 'lvl6', "mean_per_sta_arc"]
diff_val <- val1 - val2
BF3      <- signif(reportBF(ttestBF(diff_val)), digits1)
d3       <- signif(mean(diff_val)/sd(diff_val), digits1)
  • Translation Level 1 vs. Level 6: BF10 = 3.5^{15}, d = -1.7
  • Rotation Level 1 vs. Level 6: BF10 = 7.3^{16}, d = 1.9
  • Stationary Level 1 vs. Level 6: BF10 = 9.4^{8}, d = 1.1

Spatial novelty gradient in posterior medial cortex showed different principal functional connectivity manifold profiles

# Compare the PMC gradient with the first connectivity gradient from Margulies et al. (2016)
## Load PMC gradient
PMC_gradient <- read_cifti("cifti_results/SpaNov_gradient_PMC_min.dlabel.nii")

## Add to Margulies_grad data frame
Margulies_grad$PMC_gradient <- c(PMC_gradient$data$cortex_left, PMC_gradient$data$cortex_right)

## Subset to only vertices inside the PMC gradient
Margulies_grad_PMC <- Margulies_grad[Margulies_grad$PMC_gradient != 0, ]

## Make factor
Margulies_grad_PMC$PMC_gradient <- factor(Margulies_grad_PMC$PMC_gradient, levels = 1:6, ordered = TRUE)


## Create a figure
Margulies_novelty <- ggplot(Margulies_grad_PMC, aes(x = PMC_gradient, y = Grad_1, fill = PMC_gradient)) +
  geom_jitter(height = 0, size = 0.5, alpha = 0.1, width = 0.2) +
  geom_boxplot(alpha = 0.8, outlier.shape = NA) +
  scale_fill_manual(values = novFam_gradient[-5]) +
  base_theme + 
  theme(legend.position = "none") +
  labs(x = "Spatial novelty-familarity gradient (PMC)", y = "Margulies' et al (2016) - 1st gradient", title = "Relation to connectivity gradient")

# Save
ggsave(Margulies_novelty,
       filename = paste0(figurePath, "Fig_S2_ppc_PMC_gradient_Margulies.png"), 
       dpi = dpi,
       width = 90,
       height = 80,
       units = "mm")

Posterior predictive checks for the novelty score model

Find information here: https://mc-stan.org/rstanarm/reference/pp_check.stanreg.html

# Set extra seed because these plot are relatively variable with regard to the tails
set.seed(20240206)

p1 <- brms::pp_check(m_noveltyScore_run1, ndraws = 100) + 
    scale_colour_manual(values = c("black", baseColours[1])) +
  coord_cartesian(xlim = c(-20, 20), expand = TRUE) + 
  base_theme + 
  theme(axis.line.y = element_blank(),
        axis.text.y = element_blank(), 
        axis.ticks.y = element_blank(),
        legend.position = "none")
p2 <- brms::pp_check(m_noveltyScore_run2, ndraws = 100) + 
  scale_colour_manual(values = c("black", baseColours[1]), name = "") +
  coord_cartesian(xlim = c(-50, 50), expand = TRUE) + 
  base_theme + 
  theme(axis.line.y = element_blank(),
        axis.text.y = element_blank(), 
        axis.ticks.y = element_blank(),
        legend.key.size = unit(0.5, 'mm'), #change legend key size
        legend.key.height = unit(0.1, 'mm'), #change legend key height
        legend.key.width = unit(1, 'mm'), #change legend key width
        legend.title = element_text(size = 7), #change legend title font size
        legend.text = element_text(size = 3),#change legend text font size
        legend.spacing.y = unit(0, 'mm'),
        legend.position = c(1, 0),
        legend.justification = c(1, -0.5),
        legend.background = element_blank(),
        legend.margin = margin(unit(0, 'mm'), unit(0, 'mm'), unit(0, 'mm'), unit(0, 'mm')))

# Solution to change the linewith found here: https://stackoverflow.com/questions/55450441/how-to-change-size-from-specific-geom-in-ggplot2
p1$layers[[1]]$aes_params$linewidth <- 0.1
p1$layers[[2]]$aes_params$linewidth <- 0.1
p2$layers[[1]]$aes_params$linewidth <- 0.1
p2$layers[[2]]$aes_params$linewidth <- 0.1

# Combine plots
p_comb <- plot_grid(p1, p2, ncol = 2)

ggsave(p_comb,
       filename = paste0(figurePath, "Fig_S3_ppc.png"), 
       dpi = dpi,
       width = double_column/5,
       height = double_column/7,
       units = "mm")